--- /dev/null
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**System (please complete the following information):**
+ - OS: [e.g. ArchLinux]
+ - Terminal [e.g. xTerm, Guake]
+ - Version [e.g. 5.22]
+
+**Additional context**
+Add any other context about the problem here.
--- /dev/null
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
--- /dev/null
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "daily"
+ - package-ecosystem: "gomod"
+ directory: "/"
+ schedule:
+ interval: "daily"
--- /dev/null
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+#
+# ******** NOTE ********
+# We have attempted to detect the languages in your repository. Please check
+# the `language` matrix defined below to confirm you have the correct set of
+# supported CodeQL languages.
+#
+name: "CodeQL"
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: [ master ]
+ schedule:
+ - cron: '21 0 * * 3'
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ubuntu-latest
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'go' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
+ # Learn more:
+ # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v6
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v4
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+ # queries: ./path/to/local/query, your-org/your-repo/queries@main
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v4
+
+ # ℹ️ Command-line programs to run using the OS shell.
+ # 📚 https://git.io/JvXDl
+
+ # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
+ # and modify them (or add more) to build your code if your project
+ # uses a compiled language
+
+ #- run: |
+ # make bootstrap
+ # make release
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v4
--- /dev/null
+name: Docker
+
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - 'master'
+ tags:
+ - 'v*'
+
+env:
+ REGISTRY: ghcr.io
+ IMAGE_NAME: ${{ github.repository }}
+
+jobs:
+ docker:
+ runs-on: ubuntu-latest
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@v6
+ -
+ name: Login to registry
+ uses: docker/login-action@v4
+ with:
+ registry: ${{ env.REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ -
+ name: Docker meta
+ id: meta
+ uses: docker/metadata-action@v6
+ with:
+ images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
+ -
+ name: Build and push
+ uses: docker/build-push-action@v7
+ with:
+ context: .
+ push: true
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
--- /dev/null
+on:
+ push:
+ branches:
+ - master
+ pull_request:
+ branches:
+ - master
+
+name: run tests
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Install Go
+ uses: actions/setup-go@v6
+ with:
+ go-version: 1.25.x
+ - name: Checkout code
+ uses: actions/checkout@v6
+ - name: Run linters
+ uses: golangci/golangci-lint-action@v9
+ with:
+ version: v2.6
+
+ test:
+ strategy:
+ matrix:
+ go-version: [1.24.x, 1.25.x]
+ platform: [ubuntu-latest]
+ include:
+ - go-version: 1.25.x
+ platform: macos-latest
+ runs-on: ${{ matrix.platform }}
+ steps:
+ - name: Install Go
+ if: success()
+ uses: actions/setup-go@v6
+ with:
+ go-version: ${{ matrix.go-version }}
+ - name: Checkout code
+ uses: actions/checkout@v6
+ - name: Run tests
+ run: go test -v -covermode=count ./...
+
+ coverage:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Install Go
+ if: success()
+ uses: actions/setup-go@v6
+ with:
+ go-version: 1.25.x
+ - name: Checkout code
+ uses: actions/checkout@v6
+ - name: Calc coverage
+ run: |
+ go test -v -race -covermode=atomic -coverprofile=coverage.out ./...
+ - name: Upload coverage report
+ uses: codecov/codecov-action@v5
+ with:
+ files: ./coverage.out
+ fail_ci_if_error: true
+ verbose: true
+ token: ${{ secrets.CODECOV_TOKEN }}
--- /dev/null
+name: Publish to Winget
+on:
+ release:
+ types: [released]
+
+jobs:
+ publish:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: vedantmgoyal2009/winget-releaser@v2
+ with:
+ identifier: dundee.gdu
+ installers-regex: '_windows_[\w.]+\.zip$'
+ token: ${{ secrets.WINGET_TOKEN }}
--- /dev/null
+/.vscode
+/.idea
+/coverage.txt
+/coverage.out
+/coverage.html
+/dist
+/test_dir
+/tui/test_dir
+/vendor
--- /dev/null
+version: "2"
+output:
+ formats:
+ text:
+ path: stdout
+linters:
+ default: none
+ enable:
+ - bodyclose
+ - copyloopvar
+ - dogsled
+ - errcheck
+ - errorlint
+ - exhaustive
+ - funlen
+ - goconst
+ - gocritic
+ - gocyclo
+ - govet
+ - ineffassign
+ - lll
+ - nakedret
+ - revive
+ - staticcheck
+ - unparam
+ - unused
+ - whitespace
+ settings:
+ dupl:
+ threshold: 100
+ errcheck:
+ check-blank: true
+ funlen:
+ lines: 500
+ statements: 50
+ goconst:
+ min-len: 3
+ min-occurrences: 3
+ gocritic:
+ disabled-checks:
+ - whyNoLint
+ enabled-tags:
+ - diagnostic
+ - experimental
+ - opinionated
+ - performance
+ - style
+ gocyclo:
+ min-complexity: 25
+ govet:
+ enable:
+ - shadow
+ lll:
+ line-length: 160
+ revive:
+ rules:
+ - name: blank-imports
+ - name: context-as-argument
+ - name: context-keys-type
+ - name: dot-imports
+ - name: error-return
+ - name: error-strings
+ - name: error-naming
+ - name: exported
+ - name: increment-decrement
+ - name: var-naming
+ - name: var-declaration
+ - name: package-comments
+ - name: range
+ - name: receiver-naming
+ - name: time-naming
+ - name: unexported-return
+ - name: indent-error-flow
+ - name: errorf
+ - name: empty-block
+ - name: superfluous-else
+ - name: unreachable-code
+ - name: redefines-builtin-id
+ # While we agree with this rule, right now it would break too many
+ # projects. So, we disable it by default.
+ - name: unused-parameter
+ disabled: true
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ rules:
+ - linters:
+ - errcheck
+ - funlen
+ - gochecknoglobals # Globals in test files are tolerated.
+ - goconst # Repeated consts in test files are tolerated.
+ - gocritic
+ - gocyclo
+ - gosec
+ path: _test\.go
+ # This rule is buggy and breaks on our `///Block` lines. Disable for now.
+ - linters:
+ - gocritic
+ text: 'commentFormatting: put a space'
+ # This rule incorrectly flags nil references after assert.Assert(t, x != nil)
+ - linters:
+ - staticcheck
+ path: _test\.go
+ text: SA5011
+ - linters:
+ - lll
+ source: '^//go:generate '
+ - linters:
+ - gocritic
+ - lll
+ path: \.resolvers\.go
+ source: '^func \(r \*[a-zA-Z]+Resolvers\) '
+ - path: (.+)\.go$
+ # We allow error shadowing
+ text: declaration of "err" shadows declaration at
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
--- /dev/null
+the .tito/packages directory contains metadata files
+named after their packages. Each file has the latest tagged
+version and the project's relative directory.
--- /dev/null
+5.25.0-1 ./
--- /dev/null
+[buildconfig]
+builder = tito.builder.Builder
+tagger = tito.tagger.VersionTagger
+changelog_do_not_remove_cherrypick = 0
+changelog_format = %s (%ae)
--- /dev/null
+golang 1.25.5
--- /dev/null
+FROM docker.io/library/golang:1.25.5 as builder
+
+WORKDIR /app
+
+COPY go.mod go.sum ./
+RUN go mod download
+
+COPY . .
+RUN make build-static
+
+FROM scratch
+
+COPY --from=builder /app/dist/gdu /opt/gdu
+
+ENTRYPOINT ["/opt/gdu"]
--- /dev/null
+# Installation
+
+[Arch Linux](https://archlinux.org/packages/extra/x86_64/gdu/):
+
+ pacman -S gdu
+
+[Debian](https://packages.debian.org/bullseye/gdu):
+
+ apt install gdu
+
+[Ubuntu](https://launchpad.net/~daniel-milde/+archive/ubuntu/gdu)
+
+ add-apt-repository ppa:daniel-milde/gdu
+ apt-get update
+ apt-get install gdu
+
+[NixOS](https://search.nixos.org/packages?channel=unstable&show=gdu&query=gdu):
+
+ nix-env -iA nixos.gdu
+
+[Homebrew](https://formulae.brew.sh/formula/gdu):
+
+ brew install -f gdu
+ # gdu will be installed as `gdu-go` to avoid conflicts with coreutils
+ gdu-go
+
+[Mise](https://github.com/jdx/mise):
+
+ mise use -g gdu@latest
+
+[Snap](https://snapcraft.io/gdu-disk-usage-analyzer):
+
+ snap install gdu-disk-usage-analyzer
+ snap connect gdu-disk-usage-analyzer:mount-observe :mount-observe
+ snap connect gdu-disk-usage-analyzer:system-backup :system-backup
+ snap alias gdu-disk-usage-analyzer.gdu gdu
+
+[Binenv](https://github.com/devops-works/binenv)
+
+ binenv install gdu
+
+[Go](https://pkg.go.dev/github.com/dundee/gdu):
+
+ go install github.com/dundee/gdu/v5/cmd/gdu@latest
+
+[Winget](https://github.com/microsoft/winget-pkgs/tree/master/manifests/d/dundee/gdu) (for Windows users):
+
+ winget install gdu
+
+You can either run it as `gdu_windows_amd64.exe` or
+* add an alias with `Doskey`.
+* add `alias gdu="gdu_windows_amd64.exe"` to your `~/.bashrc` file if using Git Bash to run it as `gdu`.
+
+You might need to restart your terminal.
+
+[Scoop](https://github.com/ScoopInstaller/Main/blob/master/bucket/gdu.json):
+
+ scoop install gdu
+
+[X-cmd](https://www.x-cmd.com/start/)
+
+ x env use gdu
+
+## [COPR builds](https://copr.fedorainfracloud.org/coprs/faramirza/gdu/)
+COPR Builds exist for the the following Linux Distros.
+
+[How to enable a CORP Repo](https://docs.pagure.org/copr.copr/how_to_enable_repo.html)
+
+Amazon Linux 2023:
+```
+[copr:copr.fedorainfracloud.org:faramirza:gdu]
+name=Copr repo for gdu owned by faramirza
+baseurl=https://download.copr.fedorainfracloud.org/results/faramirza/gdu/amazonlinux-2023-$basearch/
+type=rpm-md
+skip_if_unavailable=True
+gpgcheck=1
+gpgkey=https://download.copr.fedorainfracloud.org/results/faramirza/gdu/pubkey.gpg
+repo_gpgcheck=0
+enabled=1
+enabled_metadata=1
+```
+EPEL 7:
+```
+[copr:copr.fedorainfracloud.org:faramirza:gdu]
+name=Copr repo for gdu owned by faramirza
+baseurl=https://download.copr.fedorainfracloud.org/results/faramirza/gdu/epel-7-$basearch/
+type=rpm-md
+skip_if_unavailable=True
+gpgcheck=1
+gpgkey=https://download.copr.fedorainfracloud.org/results/faramirza/gdu/pubkey.gpg
+repo_gpgcheck=0
+enabled=1
+enabled_metadata=1
+```
+EPEL 8:
+```
+[copr:copr.fedorainfracloud.org:faramirza:gdu]
+name=Copr repo for gdu owned by faramirza
+baseurl=https://download.copr.fedorainfracloud.org/results/faramirza/gdu/epel-8-$basearch/
+type=rpm-md
+skip_if_unavailable=True
+gpgcheck=1
+gpgkey=https://download.copr.fedorainfracloud.org/results/faramirza/gdu/pubkey.gpg
+repo_gpgcheck=0
+enabled=1
+enabled_metadata=1
+```
+EPEL 9:
+```
+[copr:copr.fedorainfracloud.org:faramirza:gdu]
+name=Copr repo for gdu owned by faramirza
+baseurl=https://download.copr.fedorainfracloud.org/results/faramirza/gdu/epel-9-$basearch/
+type=rpm-md
+skip_if_unavailable=True
+gpgcheck=1
+gpgkey=https://download.copr.fedorainfracloud.org/results/faramirza/gdu/pubkey.gpg
+repo_gpgcheck=0
+enabled=1
+enabled_metadata=1
+```
+Fedora 38:
+```
+[copr:copr.fedorainfracloud.org:faramirza:gdu]
+name=Copr repo for gdu owned by faramirza
+baseurl=https://download.copr.fedorainfracloud.org/results/faramirza/gdu/fedora-$releasever-$basearch/
+type=rpm-md
+skip_if_unavailable=True
+gpgcheck=1
+gpgkey=https://download.copr.fedorainfracloud.org/results/faramirza/gdu/pubkey.gpg
+repo_gpgcheck=0
+enabled=1
+enabled_metadata=1
+```
+Fedora 39:
+```
+[copr:copr.fedorainfracloud.org:faramirza:gdu]
+name=Copr repo for gdu owned by faramirza
+baseurl=https://download.copr.fedorainfracloud.org/results/faramirza/gdu/fedora-$releasever-$basearch/
+type=rpm-md
+skip_if_unavailable=True
+gpgcheck=1
+gpgkey=https://download.copr.fedorainfracloud.org/results/faramirza/gdu/pubkey.gpg
+repo_gpgcheck=0
+enabled=1
+enabled_metadata=1
+```
--- /dev/null
+Copyright 2020-2021 Daniel Milde <daniel@milde.cz>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
--- /dev/null
+NAME := gdu
+MAJOR_VER := v5
+PACKAGE := github.com/dundee/$(NAME)/$(MAJOR_VER)
+CMD_GDU := cmd/gdu
+VERSION := $(shell git describe --tags 2>/dev/null)
+NAMEVER := $(NAME)-$(subst v,,$(VERSION))
+DATE := $(shell date +'%Y-%m-%d')
+GOBIN := go
+GOFLAGS ?= -buildmode=pie -trimpath -mod=readonly -modcacherw -pgo=default.pgo
+GOFLAGS_STATIC ?= -trimpath -mod=readonly -modcacherw -pgo=default.pgo
+LDFLAGS := -s -w -extldflags '-static' \
+ -X '$(PACKAGE)/build.Version=$(VERSION)' \
+ -X '$(PACKAGE)/build.User=$(shell id -u -n)' \
+ -X '$(PACKAGE)/build.Time=$(shell LC_ALL=en_US.UTF-8 date)'
+TAR := tar
+ifeq ($(shell uname -s),Darwin)
+ TAR := gtar # brew install gnu-tar
+endif
+
+all: clean tarball build-all build-docker man clean-uncompressed-dist shasums
+
+run:
+ go run $(PACKAGE)/$(CMD_GDU)
+
+vendor: go.mod go.sum
+ go mod vendor
+
+tarball: vendor
+ -mkdir dist
+ $(TAR) czf dist/$(NAMEVER).tgz --transform "s,^,$(NAMEVER)/," --exclude dist --exclude test_dir --exclude coverage.txt *
+
+build:
+ @echo "Version: " $(VERSION)
+ mkdir -p dist
+ GOFLAGS="$(GOFLAGS)" CGO_ENABLED=0 $(GOBIN) build -ldflags="$(LDFLAGS)" -o dist/$(NAME) $(PACKAGE)/$(CMD_GDU)
+
+build-static:
+ @echo "Version: " $(VERSION)
+ mkdir -p dist
+ GOFLAGS="$(GOFLAGS_STATIC)" CGO_ENABLED=0 $(GOBIN) build -ldflags="$(LDFLAGS)" -o dist/$(NAME) $(PACKAGE)/$(CMD_GDU)
+
+build-docker:
+ @echo "Version: " $(VERSION)
+ docker build . --tag ghcr.io/dundee/gdu:$(VERSION)
+
+build-all:
+ @echo "Version: " $(VERSION)
+ -mkdir dist
+ -CGO_ENABLED=0 gox \
+ -os="darwin" \
+ -arch="amd64 arm64" \
+ -output="dist/gdu_{{.OS}}_{{.Arch}}" \
+ -ldflags="$(LDFLAGS)" \
+ $(PACKAGE)/$(CMD_GDU)
+
+ -CGO_ENABLED=0 gox \
+ -os="windows" \
+ -arch="amd64" \
+ -output="dist/gdu_{{.OS}}_{{.Arch}}" \
+ -ldflags="$(LDFLAGS)" \
+ $(PACKAGE)/$(CMD_GDU)
+
+ -CGO_ENABLED=0 gox \
+ -os="linux freebsd netbsd openbsd" \
+ -output="dist/gdu_{{.OS}}_{{.Arch}}" \
+ -ldflags="$(LDFLAGS)" \
+ $(PACKAGE)/$(CMD_GDU)
+
+ GOFLAGS="$(GOFLAGS)" CGO_ENABLED=0 GOOS=linux GOARCH=amd64 $(GOBIN) build -ldflags="$(LDFLAGS)" -o dist/gdu_linux_amd64 $(PACKAGE)/$(CMD_GDU)
+ GOFLAGS="$(GOFLAGS_STATIC)" CGO_ENABLED=0 GOOS=linux GOARCH=amd64 $(GOBIN) build -ldflags="$(LDFLAGS)" -o dist/gdu_linux_amd64_static $(PACKAGE)/$(CMD_GDU)
+
+ CGO_ENABLED=0 GOOS=linux GOARM=5 GOARCH=arm $(GOBIN) build -ldflags="$(LDFLAGS)" -o dist/gdu_linux_armv5l $(PACKAGE)/$(CMD_GDU)
+ CGO_ENABLED=0 GOOS=linux GOARM=6 GOARCH=arm $(GOBIN) build -ldflags="$(LDFLAGS)" -o dist/gdu_linux_armv6l $(PACKAGE)/$(CMD_GDU)
+ CGO_ENABLED=0 GOOS=linux GOARM=7 GOARCH=arm $(GOBIN) build -ldflags="$(LDFLAGS)" -o dist/gdu_linux_armv7l $(PACKAGE)/$(CMD_GDU)
+ CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(GOBIN) build -ldflags="$(LDFLAGS)" -o dist/gdu_linux_arm64 $(PACKAGE)/$(CMD_GDU)
+ CGO_ENABLED=0 GOOS=android GOARCH=arm64 $(GOBIN) build -ldflags="$(LDFLAGS)" -o dist/gdu_android_arm64 $(PACKAGE)/$(CMD_GDU)
+
+ cd dist; for file in gdu_linux_* gdu_darwin_* gdu_netbsd_* gdu_openbsd_* gdu_freebsd_* gdu_android_*; do tar czf $$file.tgz $$file; done
+ cd dist; for file in gdu_windows_*; do zip $$file.zip $$file; done
+
+gdu.1: gdu.1.md
+ sed 's/{{date}}/$(DATE)/g' gdu.1.md > gdu.1.date.md
+ pandoc gdu.1.date.md -s -t man > gdu.1
+ rm -f gdu.1.date.md
+
+man: gdu.1
+ cp gdu.1 dist
+ cd dist; tar czf gdu.1.tgz gdu.1
+
+show-man:
+ sed 's/{{date}}/$(DATE)/g' gdu.1.md > gdu.1.date.md
+ pandoc gdu.1.date.md -s -t man | man -l -
+
+test:
+ gotestsum
+
+coverage:
+ gotestsum -- -race -coverprofile=coverage.txt -covermode=atomic ./...
+
+coverage-html: coverage
+ $(GOBIN) tool cover -html=coverage.txt
+
+gobench:
+ $(GOBIN) test -bench=. $(PACKAGE)/pkg/analyze
+
+heap-profile:
+ $(GOBIN) tool pprof -web http://localhost:6060/debug/pprof/heap
+
+pgo:
+ wget -O cpu.pprof http://localhost:6060/debug/pprof/profile?seconds=30
+ $(GOBIN) tool pprof -proto cpu.pprof default.pgo > merged.pprof
+ mv merged.pprof default.pgo
+
+trace:
+ wget -O trace.out http://localhost:6060/debug/pprof/trace?seconds=30
+ gotraceui ./trace.out
+
+profile:
+ wget -O cpu.pprof http://localhost:6060/debug/pprof/profile?seconds=30
+ $(GOBIN) tool pprof -web cpu.pprof
+
+benchmark:
+ sudo cpupower frequency-set -g performance
+ hyperfine --export-markdown=bench-cold.md \
+ --prepare 'sync; echo 3 | sudo tee /proc/sys/vm/drop_caches' \
+ --ignore-failure \
+ 'dua ~' 'duc index ~' 'ncdu -0 -o /dev/null ~' \
+ 'diskus ~' 'du -hs ~' 'dust -d0 ~' 'pdu ~' \
+ 'gdu -npc ~' 'gdu -gnpc ~' 'gdu -npc --use-storage ~'
+ hyperfine --export-markdown=bench-warm.md \
+ --warmup 5 \
+ --ignore-failure \
+ 'dua ~' 'duc index ~' 'ncdu -0 -o /dev/null ~' \
+ 'diskus ~' 'du -hs ~' 'dust -d0 ~' 'pdu ~' \
+ 'gdu -npc ~' 'gdu -gnpc ~' 'gdu -npc --use-storage ~'
+ sudo cpupower frequency-set -g schedutil
+
+lint:
+ golangci-lint run -c .golangci.yml
+
+clean:
+ $(GOBIN) mod tidy
+ -rm coverage.txt
+ -rm -r test_dir
+ -rm -r vendor
+ -rm -r dist
+
+clean-uncompressed-dist:
+ find dist -type f -not -name '*.tgz' -not -name '*.zip' -delete
+
+shasums:
+ cd dist; sha256sum * > sha256sums.txt
+ cd dist; gpg --sign --armor --detach-sign sha256sums.txt
+
+release:
+ gh release create -t "gdu $(VERSION)" $(VERSION) ./dist/*
+
+install-dev-dependencies:
+ $(GOBIN) install gotest.tools/gotestsum@latest
+ $(GOBIN) install github.com/mitchellh/gox@latest
+ $(GOBIN) install honnef.co/go/gotraceui/cmd/gotraceui@latest
+ $(GOBIN) install github.com/golangci/golangci-lint/cmd/golangci-lint@2.8.0
+
+.PHONY: run build build-static build-all test gobench benchmark coverage coverage-html clean clean-uncompressed-dist man show-man release dev-build
--- /dev/null
+# go DiskUsage()
+
+<img src="./gdu.png" alt="Gdu " width="200" align="right">
+
+[](https://codecov.io/gh/dundee/gdu)
+[](https://goreportcard.com/report/github.com/dundee/gdu)
+[](https://codeclimate.com/github/dundee/gdu/maintainability)
+[](https://codescene.io/projects/13129)
+
+Pretty fast disk usage analyzer written in Go.
+
+Gdu is intended primarily for SSD disks where it can fully utilize parallel processing.
+However HDDs work as well, but the performance gain is not so huge.
+
+[](https://asciinema.org/a/382738)
+
+<a href="https://repology.org/project/gdu/versions">
+ <img src="https://repology.org/badge/vertical-allrepos/gdu.svg" alt="Packaging status" align="right">
+</a>
+
+## Installation
+
+Head for the [releases page](https://github.com/dundee/gdu/releases) and download the binary for your system.
+
+Using curl:
+
+ curl -L https://github.com/dundee/gdu/releases/latest/download/gdu_linux_amd64.tgz | tar xz
+ chmod +x gdu_linux_amd64
+ mv gdu_linux_amd64 /usr/bin/gdu
+
+See the [installation page](./INSTALL.md) for other ways how to install Gdu to your system.
+
+Or you can use Gdu directly via Docker:
+
+ docker run --rm --init --interactive --tty --privileged --volume /:/mnt/root ghcr.io/dundee/gdu /mnt/root
+
+## Usage
+
+```
+ gdu [directory_to_scan] [flags]
+
+Flags:
+ --archive-browsing Enable browsing of zip/jar archives
+ --collapse-path Collapse single-child directory chains
+ --config-file string Read config from file (default is $HOME/.gdu.yaml)
+ -D, --db string Store analysis in database (*.sqlite for SQLite, *.badger for BadgerDB)
+ --depth int Show directory structure up to specified depth in non-interactive mode (0 means the flag is ignored)
+ --enable-profiling Enable collection of profiling data and provide it on http://localhost:6060/debug/pprof/
+ -E, --exclude-type strings File types to exclude (e.g., --exclude-type yaml,json)
+ -L, --follow-symlinks Follow symlinks for files, i.e. show the size of the file to which symlink points to (symlinks to directories are not followed)
+ -h, --help help for gdu
+ -i, --ignore-dirs strings Paths to ignore (separated by comma). Can be absolute or relative to current directory (default [/proc,/dev,/sys,/run])
+ -I, --ignore-dirs-pattern strings Path patterns to ignore (separated by comma)
+ -X, --ignore-from string Read path patterns to ignore from file
+ -f, --input-file string Import analysis from JSON file
+ -l, --log-file string Path to a logfile (default "/dev/null")
+ --max-age string Include files with mtime no older than DURATION (e.g., 7d, 2h30m, 1y2mo)
+ -m, --max-cores int Set max cores that Gdu will use. 8 cores available (default 8)
+ --min-age string Include files with mtime at least DURATION old (e.g., 30d, 1w)
+ --mouse Use mouse
+ -c, --no-color Do not use colorized output
+ -x, --no-cross Do not cross filesystem boundaries
+ --no-delete Do not allow deletions
+ --no-view-file Do not allow viewing file contents
+ -H, --no-hidden Ignore hidden directories (beginning with dot)
+ --no-prefix Show sizes as raw numbers without any prefixes (SI or binary) in non-interactive mode
+ -p, --no-progress Do not show progress in non-interactive mode
+ --no-spawn-shell Do not allow spawning shell
+ -u, --no-unicode Do not use Unicode symbols (for size bar)
+ -n, --non-interactive Do not run in interactive mode
+ -o, --output-file string Export all info into file as JSON
+ -r, --read-from-storage Use existing database instead of re-scanning
+ --reverse-sort Reverse sorting order (smallest to largest) in non-interactive mode
+ --sequential Use sequential scanning (intended for rotating HDDs)
+ -A, --show-annexed-size Use apparent size of git-annex'ed files in case files are not present locally (real usage is zero)
+ -a, --show-apparent-size Show apparent size
+ -d, --show-disks Show all mounted disks
+ -k, --show-in-kib Show sizes in KiB (or kB with --si) in non-interactive mode
+ -C, --show-item-count Show number of items in directory
+ -M, --show-mtime Show latest mtime of items in directory
+ -B, --show-relative-size Show relative size
+ --si Show sizes with decimal SI prefixes (kB, MB, GB) instead of binary prefixes (KiB, MiB, GiB)
+ --since string Include files with mtime >= WHEN. WHEN accepts RFC3339 timestamp (e.g., 2025-08-11T01:00:00-07:00) or date only YYYY-MM-DD (calendar-day compare; includes the whole day)
+ -s, --summarize Show only a total in non-interactive mode
+ -t, --top int Show only top X largest files in non-interactive mode
+ -T, --type strings File types to include (e.g., --type yaml,json)
+ --until string Include files with mtime <= WHEN. WHEN accepts RFC3339 timestamp or date only YYYY-MM-DD
+ -v, --version Print version
+ --write-config Write current configuration to file (default is $HOME/.gdu.yaml)
+
+Basic list of actions in interactive mode (show help modal for more):
+ ↑ or k Move cursor up
+ ↓ or j Move cursor down
+ → or Enter or l Go to highlighted directory
+ ← or h Go to parent directory
+ d Delete the selected file or directory
+ e Empty the selected directory
+ n Sort by name
+ s Sort by size
+ c Show number of items in directory
+ ? Show help modal
+```
+
+## Examples
+
+ gdu # analyze current dir
+ gdu -a # show apparent size instead of disk usage
+ gdu --no-delete # prevent write operations
+ gdu --no-view-file # prevent viewing file contents
+ gdu <some_dir_to_analyze> # analyze given dir
+ gdu -d # show all mounted disks
+ gdu -l ./gdu.log <some_dir> # write errors to log file
+ gdu -i /sys,/proc / # ignore some paths
+ gdu -I '.*[abc]+' # ignore paths by regular pattern
+ gdu -X ignore_file / # ignore paths by regular patterns from file
+ gdu -c / # use only white/gray/black colors
+
+ gdu -n / # only print stats, do not start interactive mode
+ gdu -p / # do not show progress, useful when using its output in a script
+ gdu -ps /some/dir # show only total usage for given dir
+ gdu -t 10 / # show top 10 largest files
+ gdu --reverse-sort -n / # show files sorted from smallest to largest in non-interactive mode
+ gdu / > file # write stats to file, do not start interactive mode
+
+ gdu -o- / | gzip -c >report.json.gz # write all info to JSON file for later analysis
+ zcat report.json.gz | gdu -f- # read analysis from file
+
+ GOGC=10 gdu -g --use-storage / # use persistent key-value storage for saving analysis data
+ gdu -r / # read saved analysis data from persistent key-value storage
+
+## Modes
+
+Gdu has three modes: interactive (default), non-interactive and export.
+
+Non-interactive mode is started automatically when TTY is not detected (using [go-isatty](https://github.com/mattn/go-isatty)), for example if the output is being piped to a file, or it can be started explicitly by using a flag.
+
+Export mode (flag `-o`) outputs all usage data as JSON, which can be later opened using the `-f` flag.
+
+Hard links are counted only once.
+
+## File flags
+
+Files and directories may be prefixed by a one-character
+flag with following meaning:
+
+* `!` An error occurred while reading this directory.
+
+* `.` An error occurred while reading a subdirectory, size may be not correct.
+
+* `@` File is symlink or socket.
+
+* `H` Same file was already counted (hard link).
+
+* `e` Directory is empty.
+
+## Configuration file
+
+Gdu can read (and write) YAML configuration file.
+
+`$HOME/.config/gdu/gdu.yaml` and `$HOME/.gdu.yaml` are checked for the presence of the config file by default.
+
+See the [full list of all configuration options](configuration.md).
+
+### Examples
+
+* To configure gdu to permanently run in gray-scale color mode:
+
+```
+echo "no-color: true" >> ~/.gdu.yaml
+```
+
+* To set default sorting in configuration file:
+
+```
+sorting:
+ by: name // size, name, itemCount, mtime
+ order: desc
+```
+
+* To configure gdu to set CWD variable when browsing directories:
+
+```
+echo "change-cwd: true" >> ~/.gdu.yaml
+```
+
+* To save the current configuration
+
+```
+gdu --write-config
+```
+
+## Styling
+
+There are wide options for how terminals can be colored.
+Some gdu primitives (like basic text) adapt to different color schemas, but the selected/highlighted row does not.
+
+If the default look is not sufficient, it can be changed in configuration file, e.g.:
+
+```
+style:
+ selected-row:
+ text-color: black
+ background-color: "#ff0000"
+```
+
+## Deletion in background and in parallel (experimental)
+
+Gdu can delete items in the background, thus not blocking the UI for additional work.
+To enable:
+
+```
+echo "delete-in-background: true" >> ~/.gdu.yaml
+```
+
+Directory items can be also deleted in parallel, which might increase the speed of deletion.
+To enable:
+
+```
+echo "delete-in-parallel: true" >> ~/.gdu.yaml
+```
+
+## Saving analysis data to database
+
+Gdu can store the analysis data to a database file instead of just memory.
+This allows you to save and reload analysis results later.
+Both SQLite and BadgerDB are supported.
+
+```
+gdu --db analysis.sqlite / # saves analysis data to SQLite database
+gdu --db analysis.badger / # saves analysis data to BadgerDB
+gdu -r --db analysis.sqlite / # reads saved data, does not run analysis again
+```
+
+## Running tests
+
+ make install-dev-dependencies
+ make test
+
+## Profiling
+
+Gdu can collect profiling data when the `--enable-profiling` flag is set.
+The data are provided via embedded http server on URL `http://localhost:6060/debug/pprof/`.
+
+You can then use e.g. `go tool pprof -web http://localhost:6060/debug/pprof/heap`
+to open the heap profile as SVG image in your web browser.
+
+## Benchmarks
+
+Benchmarks were performed on 50G directory (100k directories, 400k files) on 500 GB SSD using [hyperfine](https://github.com/sharkdp/hyperfine).
+See `benchmark` target in [Makefile](Makefile) for more info.
+
+### Cold cache
+
+Filesystem cache was cleared using `sync; echo 3 | sudo tee /proc/sys/vm/drop_caches`.
+
+| Command | Mean [s] | Min [s] | Max [s] | Relative |
+|:---|---:|---:|---:|---:|
+| `diskus ~` | 3.074 ± 0.010 | 3.056 | 3.094 | 1.00 |
+| `gdu -npc ~` | 3.133 ± 0.013 | 3.116 | 3.159 | 1.02 ± 0.01 |
+| `gdu -gnpc ~` | 3.157 ± 0.013 | 3.139 | 3.180 | 1.03 ± 0.01 |
+| `pdu ~` | 3.772 ± 0.149 | 3.630 | 4.071 | 1.23 ± 0.05 |
+| `dust -d0 ~` | 4.001 ± 0.162 | 3.786 | 4.305 | 1.30 ± 0.05 |
+| `dua ~` | 5.315 ± 3.210 | 4.068 | 14.447 | 1.73 ± 1.04 |
+| `gdu -npc --use-storage ~` | 12.690 ± 0.527 | 11.325 | 13.091 | 4.13 ± 0.17 |
+| `du -hs ~` | 14.940 ± 0.064 | 14.852 | 15.048 | 4.86 ± 0.03 |
+| `duc index ~` | 15.501 ± 0.136 | 15.386 | 15.849 | 5.04 ± 0.05 |
+| `ncdu -0 -o /dev/null ~` | 15.688 ± 0.053 | 15.610 | 15.789 | 5.10 ± 0.02 |
+
+### Warm cache
+
+| Command | Mean [ms] | Min [ms] | Max [ms] | Relative |
+|:---|---:|---:|---:|---:|
+| `diskus ~` | 211.4 ± 3.7 | 206.4 | 219.3 | 1.00 |
+| `pdu ~` | 221.8 ± 2.4 | 219.3 | 226.3 | 1.05 ± 0.02 |
+| `dust -d0 ~` | 363.6 ± 5.4 | 357.3 | 373.2 | 1.72 ± 0.04 |
+| `gdu -npc ~` | 434.3 ± 3.4 | 426.0 | 437.8 | 2.05 ± 0.04 |
+| `dua ~` | 451.2 ± 4.2 | 444.9 | 457.9 | 2.13 ± 0.04 |
+| `gdu -gnpc ~` | 521.0 ± 14.0 | 510.9 | 558.5 | 2.46 ± 0.08 |
+| `du -hs ~` | 809.4 ± 3.2 | 804.8 | 816.0 | 3.83 ± 0.07 |
+| `duc index ~` | 952.3 ± 4.8 | 946.0 | 961.7 | 4.50 ± 0.08 |
+| `ncdu -0 -o /dev/null ~` | 1432.8 ± 3.4 | 1428.0 | 1439.0 | 6.78 ± 0.12 |
+| `gdu -npc --use-storage ~` | 9950.0 ± 474.1 | 9117.5 | 10647.4 | 47.07 ± 2.39 |
+
+## Alternatives
+
+* [ncdu](https://dev.yorhel.nl/ncdu) - NCurses based tool written in pure `C` (LTS) or `zig` (Stable)
+* [godu](https://github.com/viktomas/godu) - Analyzer with a carousel like user interface
+* [dua](https://github.com/Byron/dua-cli) - Tool written in `Rust` with interface similar to gdu (and ncdu)
+* [diskus](https://github.com/sharkdp/diskus) - Very simple but very fast tool written in `Rust`
+* [duc](https://duc.zevv.nl/) - Collection of tools with many possibilities for inspecting and visualising disk usage
+* [dust](https://github.com/bootandy/dust) - Tool written in `Rust` showing tree like structures of disk usage
+* [pdu](https://github.com/KSXGitHub/parallel-disk-usage) - Tool written in `Rust` showing tree like structures of disk usage
+
+## Notes
+
+[HDD icon created by Nikita Golubev - Flaticon](https://www.flaticon.com/free-icons/hdd)
--- /dev/null
+package build
+
+// Version stores the current version of the app
+var Version = "development"
+
+// Time of the build
+var Time string
+
+// User who built it
+var User string
+
+// RootPathPrefix stores path to be prepended to given absolute path
+// e.g. /var/lib/snapd/hostfs for snap
+var RootPathPrefix = ""
--- /dev/null
+package app
+
+import (
+ "fmt"
+ "io"
+ "io/fs"
+ "net/http"
+ "net/http/pprof"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+ log "github.com/sirupsen/logrus"
+
+ "github.com/dundee/gdu/v5/build"
+ "github.com/dundee/gdu/v5/internal/common"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/device"
+ gfs "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/dundee/gdu/v5/pkg/timefilter"
+ "github.com/dundee/gdu/v5/report"
+ "github.com/dundee/gdu/v5/stdout"
+ "github.com/dundee/gdu/v5/tui"
+)
+
+// UI is common interface for both terminal UI and text output
+type UI interface {
+ ListDevices(getter device.DevicesInfoGetter) error
+ AnalyzePath(path string, parentDir gfs.Item) error
+ ReadAnalysis(input io.Reader) error
+ ReadFromStorage(storagePath, path string) error
+ SetIgnoreTypes(types []string)
+ SetIgnoreDirPaths(paths []string)
+ SetIgnoreDirPatterns(paths []string) error
+ SetIgnoreFromFile(ignoreFile string) error
+ SetIgnoreHidden(value bool)
+ SetIncludeTypes(types []string)
+ SetFollowSymlinks(value bool)
+ SetShowAnnexedSize(value bool)
+ SetAnalyzer(analyzer common.Analyzer)
+ SetTimeFilter(timeFilter common.TimeFilter)
+ SetArchiveBrowsing(value bool)
+ SetCollapsePath(value bool)
+ StartUILoop() error
+}
+
+// Flags define flags accepted by Run
+type Flags struct {
+ Style Style `yaml:"style"`
+ Sorting Sorting `yaml:"sorting"`
+ CfgFile string `yaml:"-"`
+ LogFile string `yaml:"log-file"`
+ InputFile string `yaml:"input-file"`
+ OutputFile string `yaml:"output-file"`
+ IgnoreFromFile string `yaml:"ignore-from-file"`
+ IgnoreDirs []string `yaml:"ignore-dirs"`
+ IgnoreDirPatterns []string `yaml:"ignore-dir-patterns"`
+ TypeFilter []string `yaml:"type"`
+ ExcludeTypeFilter []string `yaml:"exclude-type"`
+ MaxCores int `yaml:"max-cores"`
+ Top int `yaml:"top"`
+ Depth int `yaml:"depth"`
+ SequentialScanning bool `yaml:"sequential-scanning"`
+ ShowDisks bool `yaml:"-"`
+ ShowApparentSize bool `yaml:"show-apparent-size"`
+ ShowRelativeSize bool `yaml:"show-relative-size"`
+ ShowAnnexedSize bool `yaml:"show-annexed-size"`
+ ShowVersion bool `yaml:"-"`
+ ShowItemCount bool `yaml:"show-item-count"`
+ ShowMTime bool `yaml:"show-mtime"`
+ NoColor bool `yaml:"no-color"`
+ Mouse bool `yaml:"mouse"`
+ NonInteractive bool `yaml:"non-interactive"`
+ NoProgress bool `yaml:"no-progress"`
+ NoUnicode bool `yaml:"no-unicode"`
+ NoCross bool `yaml:"no-cross"`
+ NoHidden bool `yaml:"no-hidden"`
+ NoDelete bool `yaml:"no-delete"`
+ NoViewFile bool `yaml:"no-view-file"`
+ NoSpawnShell bool `yaml:"no-spawn-shell"`
+ FollowSymlinks bool `yaml:"follow-symlinks"`
+ Profiling bool `yaml:"profiling"`
+ ReadFromStorage bool `yaml:"read-from-storage"`
+ DbPath string `yaml:"db"`
+ Summarize bool `yaml:"summarize"`
+ UseSIPrefix bool `yaml:"use-si-prefix"`
+ NoPrefix bool `yaml:"no-prefix"`
+ ShowInKiB bool `yaml:"show-in-kib"`
+ WriteConfig bool `yaml:"-"`
+ ReverseSort bool `yaml:"reverse-sort"`
+ ChangeCwd bool `yaml:"change-cwd"`
+ DeleteInBackground bool `yaml:"delete-in-background"`
+ DeleteInParallel bool `yaml:"delete-in-parallel"`
+ Since string `yaml:"since"`
+ Until string `yaml:"until"`
+ MaxAge string `yaml:"max-age"`
+ MinAge string `yaml:"min-age"`
+ ArchiveBrowsing bool `yaml:"archive-browsing"`
+ CollapsePath bool `yaml:"collapse-path"`
+ BrowseParentDirs bool `yaml:"browse-parent-dirs"`
+}
+
+// ShouldRunInNonInteractiveMode checks if the application should run in non-interactive mode
+// based on the flags set.
+func (f *Flags) ShouldRunInNonInteractiveMode(istty bool) bool {
+ return !istty ||
+ f.ShowVersion ||
+ f.NonInteractive ||
+ f.OutputFile != "" ||
+ f.NoPrefix ||
+ f.NoProgress ||
+ f.Summarize ||
+ f.Top > 0
+}
+
+// Style define style config
+type Style struct {
+ Footer FooterColorStyle `yaml:"footer"`
+ SelectedRow ColorStyle `yaml:"selected-row"`
+ ResultRow ResultRowColorStyle `yaml:"result-row"`
+ Header HeaderColorStyle `yaml:"header"`
+ ProgressModal ProgressModalOpts `yaml:"progress-modal"`
+ UseOldSizeBar bool `yaml:"use-old-size-bar"`
+}
+
+// ProgressModalOpts defines options for progress modal
+type ProgressModalOpts struct {
+ CurrentItemNameMaxLen int `yaml:"current-item-path-max-len"`
+}
+
+// ColorStyle defines styling of some item
+type ColorStyle struct {
+ TextColor string `yaml:"text-color"`
+ BackgroundColor string `yaml:"background-color"`
+}
+
+// FooterColorStyle defines styling of footer
+type FooterColorStyle struct {
+ TextColor string `yaml:"text-color"`
+ BackgroundColor string `yaml:"background-color"`
+ NumberColor string `yaml:"number-color"`
+}
+
+// HeaderColorStyle defines styling of header
+type HeaderColorStyle struct {
+ TextColor string `yaml:"text-color"`
+ BackgroundColor string `yaml:"background-color"`
+ Hidden bool `yaml:"hidden"`
+}
+
+// ResultRowColorStyle defines styling of result row
+type ResultRowColorStyle struct {
+ NumberColor string `yaml:"number-color"`
+ DirectoryColor string `yaml:"directory-color"`
+}
+
+// Sorting defines default sorting of items
+type Sorting struct {
+ By string `yaml:"by"`
+ Order string `yaml:"order"`
+}
+
+// App defines the main application
+type App struct {
+ Writer io.Writer
+ TermApp common.TermApplication
+ Screen tcell.Screen
+ Getter device.DevicesInfoGetter
+ Flags *Flags
+ PathChecker func(string) (fs.FileInfo, error)
+ Args []string
+ Istty bool
+}
+
+func init() {
+ http.DefaultServeMux = http.NewServeMux()
+}
+
+// Run starts gdu main logic
+//
+//nolint:gocyclo,funlen // App function is a suite of if statements
+func (a *App) Run() error {
+ var ui UI
+
+ if a.Flags.ShowVersion {
+ fmt.Fprintln(a.Writer, "Version:\t", build.Version)
+ fmt.Fprintln(a.Writer, "Built time:\t", build.Time)
+ fmt.Fprintln(a.Writer, "Built user:\t", build.User)
+ return nil
+ }
+
+ log.Printf("Runtime flags: %+v", *a.Flags)
+
+ if a.Flags.NoPrefix && a.Flags.UseSIPrefix {
+ return fmt.Errorf("--no-prefix and --si cannot be used at once")
+ }
+
+ path := a.getPath()
+ path, err := filepath.Abs(path)
+ if err != nil {
+ return err
+ }
+
+ ui, err = a.createUI()
+ if err != nil {
+ return err
+ }
+
+ if a.Flags.DbPath != "" {
+ if !a.Flags.ReadFromStorage {
+ // Remove existing db before re-scan
+ if strings.HasSuffix(a.Flags.DbPath, ".badger") {
+ os.RemoveAll(a.Flags.DbPath)
+ } else {
+ os.Remove(a.Flags.DbPath)
+ }
+ }
+ if strings.HasSuffix(a.Flags.DbPath, ".badger") {
+ ui.SetAnalyzer(analyze.CreateStoredAnalyzer(a.Flags.DbPath))
+ } else {
+ sqliteAnalyzer, err := analyze.CreateSqliteAnalyzer(a.Flags.DbPath)
+ if err != nil {
+ return fmt.Errorf("creating sqlite analyzer: %w", err)
+ }
+ ui.SetAnalyzer(sqliteAnalyzer)
+ }
+ }
+ if a.Flags.SequentialScanning {
+ ui.SetAnalyzer(analyze.CreateSeqAnalyzer())
+ }
+ if a.Flags.FollowSymlinks {
+ ui.SetFollowSymlinks(true)
+ }
+ if a.Flags.ShowAnnexedSize {
+ ui.SetShowAnnexedSize(true)
+ }
+ if a.Flags.ArchiveBrowsing {
+ ui.SetArchiveBrowsing(true)
+ }
+ if a.Flags.CollapsePath {
+ ui.SetCollapsePath(true)
+ }
+
+ // Set up time filter if any time flags are provided
+ if a.Flags.Since != "" || a.Flags.Until != "" || a.Flags.MaxAge != "" || a.Flags.MinAge != "" {
+ if err := a.setTimeFilters(ui); err != nil {
+ return err
+ }
+ }
+ if err := a.setNoCross(path); err != nil {
+ return err
+ }
+
+ // Process type filters
+ if len(a.Flags.TypeFilter) > 0 {
+ ui.SetIncludeTypes(a.Flags.TypeFilter)
+ }
+ if len(a.Flags.ExcludeTypeFilter) > 0 {
+ ui.SetIgnoreTypes(a.Flags.ExcludeTypeFilter)
+ }
+
+ ui.SetIgnoreDirPaths(a.Flags.IgnoreDirs)
+
+ if len(a.Flags.IgnoreDirPatterns) > 0 {
+ if err := ui.SetIgnoreDirPatterns(a.Flags.IgnoreDirPatterns); err != nil {
+ return err
+ }
+ }
+
+ if a.Flags.IgnoreFromFile != "" {
+ if err := ui.SetIgnoreFromFile(a.Flags.IgnoreFromFile); err != nil {
+ return err
+ }
+ }
+
+ if a.Flags.NoHidden {
+ ui.SetIgnoreHidden(true)
+ }
+
+ a.setMaxProcs()
+
+ if err := a.runAction(ui, path); err != nil {
+ return err
+ }
+
+ return ui.StartUILoop()
+}
+
+func (a *App) getPath() string {
+ if len(a.Args) == 1 {
+ return a.Args[0]
+ }
+ return "."
+}
+
+func (a *App) setMaxProcs() {
+ if a.Flags.MaxCores < 1 || a.Flags.MaxCores > runtime.NumCPU() {
+ return
+ }
+
+ runtime.GOMAXPROCS(a.Flags.MaxCores)
+
+ // runtime.GOMAXPROCS(n) with n < 1 doesn't change current setting so we use it to check current value
+ log.Printf("Max cores set to %d", runtime.GOMAXPROCS(0))
+}
+
+func (a *App) setTimeFilters(ui UI) error {
+ loc := time.Local
+ now := time.Now()
+
+ timeFilter, err := timefilter.NewTimeFilter(
+ a.Flags.Since,
+ a.Flags.Until,
+ a.Flags.MaxAge,
+ a.Flags.MinAge,
+ now,
+ loc,
+ )
+ if err != nil {
+ return fmt.Errorf("invalid time filter: %w", err)
+ }
+
+ if !timeFilter.IsEmpty() {
+ timeFilterFunc := func(mtime time.Time) bool {
+ return timeFilter.IncludeByTimeFilter(mtime, loc)
+ }
+ ui.SetTimeFilter(timeFilterFunc)
+
+ // If this is a TUI, also set the filter info for display
+ if tuiUI, ok := ui.(*tui.UI); ok {
+ tuiUI.SetTimeFilterWithInfo(timeFilter, loc)
+ }
+ }
+ return nil
+}
+
+func (a *App) createUI() (UI, error) {
+ var ui UI
+ var err error
+
+ switch {
+ case a.Flags.OutputFile != "":
+ var output io.Writer
+ if a.Flags.OutputFile == "-" {
+ output = os.Stdout
+ } else {
+ output, err = os.OpenFile(a.Flags.OutputFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
+ if err != nil {
+ return nil, fmt.Errorf("opening output file: %w", err)
+ }
+ }
+ ui = report.CreateExportUI(
+ a.Writer,
+ output,
+ !a.Flags.NoColor && a.Istty,
+ !a.Flags.NoProgress && a.Istty,
+ a.Flags.UseSIPrefix,
+ )
+ case a.Flags.ShouldRunInNonInteractiveMode(a.Istty):
+ fixedUnit := ""
+ if a.Flags.ShowInKiB {
+ fixedUnit = "k"
+ }
+ stdoutUI := stdout.CreateStdoutUI(
+ a.Writer,
+ !a.Flags.NoColor && a.Istty,
+ !a.Flags.NoProgress && a.Istty,
+ a.Flags.ShowApparentSize,
+ a.Flags.ShowRelativeSize,
+ a.Flags.Summarize,
+ a.Flags.UseSIPrefix,
+ a.Flags.NoPrefix,
+ fixedUnit,
+ a.Flags.Top,
+ a.Flags.ReverseSort,
+ a.Flags.Depth,
+ )
+ if a.Flags.NoUnicode {
+ stdoutUI.UseOldProgressRunes()
+ }
+ if a.Flags.ShowItemCount {
+ stdoutUI.SetShowItemCount()
+ }
+ ui = stdoutUI
+ default:
+ opts := a.getOptions()
+
+ ui = tui.CreateUI(
+ a.TermApp,
+ a.Screen,
+ os.Stdout,
+ !a.Flags.NoColor,
+ a.Flags.ShowApparentSize,
+ a.Flags.ShowRelativeSize,
+ a.Flags.UseSIPrefix,
+ opts...,
+ )
+
+ if !a.Flags.NoColor {
+ tview.Styles.TitleColor = tcell.NewRGBColor(27, 161, 227)
+ } else {
+ tview.Styles.ContrastBackgroundColor = tcell.NewRGBColor(150, 150, 150)
+ }
+ tview.Styles.BorderColor = tcell.ColorDefault
+ }
+
+ return ui, nil
+}
+
+func (a *App) getOptions() []tui.Option {
+ var opts []tui.Option
+
+ if a.Flags.Style.SelectedRow.TextColor != "" {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetSelectedTextColor(tcell.GetColor(a.Flags.Style.SelectedRow.TextColor))
+ })
+ }
+ if a.Flags.Style.SelectedRow.BackgroundColor != "" {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetSelectedBackgroundColor(tcell.GetColor(a.Flags.Style.SelectedRow.BackgroundColor))
+ })
+ }
+ if a.Flags.Style.Footer.TextColor != "" {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetFooterTextColor(a.Flags.Style.Footer.TextColor)
+ })
+ }
+ if a.Flags.Style.Footer.BackgroundColor != "" {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetFooterBackgroundColor(a.Flags.Style.Footer.BackgroundColor)
+ })
+ }
+ if a.Flags.Style.Footer.NumberColor != "" {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetFooterNumberColor(a.Flags.Style.Footer.NumberColor)
+ })
+ }
+ if a.Flags.Style.Header.TextColor != "" {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetHeaderTextColor(a.Flags.Style.Header.TextColor)
+ })
+ }
+ if a.Flags.Style.Header.BackgroundColor != "" {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetHeaderBackgroundColor(a.Flags.Style.Header.BackgroundColor)
+ })
+ }
+ if a.Flags.Style.Header.Hidden {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetHeaderHidden()
+ })
+ }
+ if a.Flags.Style.ResultRow.NumberColor != "" {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetResultRowNumberColor(a.Flags.Style.ResultRow.NumberColor)
+ })
+ }
+ if a.Flags.Style.ResultRow.DirectoryColor != "" {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetResultRowDirectoryColor(a.Flags.Style.ResultRow.DirectoryColor)
+ })
+ }
+ if a.Flags.Style.ProgressModal.CurrentItemNameMaxLen > 0 {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetCurrentItemNameMaxLen(a.Flags.Style.ProgressModal.CurrentItemNameMaxLen)
+ })
+ }
+ if a.Flags.Style.UseOldSizeBar || a.Flags.NoUnicode {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.UseOldSizeBar()
+ })
+ }
+ if a.Flags.Sorting.Order != "" || a.Flags.Sorting.By != "" {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetDefaultSorting(a.Flags.Sorting.By, a.Flags.Sorting.Order)
+ })
+ }
+ if a.Flags.ChangeCwd {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetChangeCwdFn(os.Chdir)
+ })
+ }
+ if a.Flags.ShowItemCount {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetShowItemCount()
+ })
+ }
+ if a.Flags.ShowMTime {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetShowMTime()
+ })
+ }
+ if a.Flags.NoDelete {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetNoDelete()
+ })
+ }
+ if a.Flags.NoViewFile {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetNoViewFile()
+ })
+ }
+ if a.Flags.NoSpawnShell {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetNoSpawnShell()
+ })
+ }
+ if a.Flags.DeleteInBackground {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetDeleteInBackground()
+ })
+ }
+ if a.Flags.DeleteInParallel {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetDeleteInParallel()
+ })
+ }
+ if a.Flags.BrowseParentDirs {
+ opts = append(opts, func(ui *tui.UI) {
+ ui.SetBrowseParentDirs()
+ })
+ }
+ return opts
+}
+
+func (a *App) setNoCross(path string) error {
+ if a.Flags.NoCross {
+ mounts, err := a.Getter.GetMounts()
+ if err != nil {
+ return fmt.Errorf("loading mount points: %w", err)
+ }
+ paths := device.GetNestedMountpointsPaths(path, mounts)
+ log.Printf("Ignoring mount points: %s", strings.Join(paths, ", "))
+ a.Flags.IgnoreDirs = append(a.Flags.IgnoreDirs, paths...)
+ }
+ return nil
+}
+
+func (a *App) runAction(ui UI, path string) error {
+ if a.Flags.Profiling {
+ go func() {
+ http.HandleFunc("/debug/pprof/", pprof.Index)
+ http.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
+ http.HandleFunc("/debug/pprof/profile", pprof.Profile)
+ http.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
+ http.HandleFunc("/debug/pprof/trace", pprof.Trace)
+ log.Println(http.ListenAndServe("localhost:6060", nil))
+ }()
+ }
+
+ switch {
+ case a.Flags.ShowDisks:
+ if err := ui.ListDevices(a.Getter); err != nil {
+ return fmt.Errorf("loading mount points: %w", err)
+ }
+ case a.Flags.InputFile != "":
+ var input io.Reader
+ var err error
+ if a.Flags.InputFile == "-" {
+ input = os.Stdin
+ } else {
+ input, err = os.OpenFile(a.Flags.InputFile, os.O_RDONLY, 0o600)
+ if err != nil {
+ return fmt.Errorf("opening input file: %w", err)
+ }
+ }
+
+ if err := ui.ReadAnalysis(input); err != nil {
+ return fmt.Errorf("reading analysis: %w", err)
+ }
+ default:
+ if build.RootPathPrefix != "" {
+ path = build.RootPathPrefix + path
+ }
+
+ _, err := a.PathChecker(path)
+ if err != nil {
+ return err
+ }
+
+ log.Printf("Analyzing path: %s", path)
+ if err := ui.AnalyzePath(path, nil); err != nil {
+ return fmt.Errorf("scanning dir: %w", err)
+ }
+ }
+ return nil
+}
--- /dev/null
+//go:build linux
+
+package app
+
+import (
+ "os"
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testdev"
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/device"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNoCrossWithErr(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", NoCross: true},
+ []string{"test_dir"},
+ false,
+ device.LinuxDevicesInfoGetter{MountsPath: "/xxxyyy"},
+ )
+
+ assert.Equal(t, "loading mount points: open /xxxyyy: no such file or directory", err.Error())
+ assert.Empty(t, out)
+}
+
+func TestListDevicesWithErr(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ _, err := runApp(
+ &Flags{LogFile: "/dev/null", ShowDisks: true},
+ []string{},
+ false,
+ device.LinuxDevicesInfoGetter{MountsPath: "/xxxyyy"},
+ )
+
+ assert.Equal(t, "loading mount points: open /xxxyyy: no such file or directory", err.Error())
+}
+
+func TestOutputFileError(t *testing.T) {
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", OutputFile: "/xyzxyz"},
+ []string{},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Empty(t, out)
+ assert.Contains(t, err.Error(), "permission denied")
+}
+
+func TestUseStorage(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ const storagePath = "/tmp/badger-test.badger"
+ defer func() {
+ err := os.RemoveAll(storagePath)
+ if err != nil {
+ panic(err)
+ }
+ }()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", DbPath: storagePath},
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Contains(t, out, "nested")
+ assert.Nil(t, err)
+}
+
+func TestReadFromStorage(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ storagePath := "/tmp/badger-test4.badger"
+ defer func() {
+ err := os.RemoveAll(storagePath)
+ if err != nil {
+ panic(err)
+ }
+ }()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", DbPath: storagePath},
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+ assert.Contains(t, out, "nested")
+ assert.Nil(t, err)
+
+ out, err = runApp(
+ &Flags{LogFile: "/dev/null", ReadFromStorage: true, DbPath: storagePath},
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+ assert.Contains(t, out, "nested")
+ assert.Nil(t, err)
+}
--- /dev/null
+package app
+
+import (
+ "bytes"
+ "os"
+ "regexp"
+ "runtime"
+ "strings"
+ "testing"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/dundee/gdu/v5/internal/testapp"
+ "github.com/dundee/gdu/v5/internal/testdev"
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/device"
+ "github.com/stretchr/testify/assert"
+)
+
+func init() {
+ log.SetLevel(log.WarnLevel)
+}
+
+func TestVersion(t *testing.T) {
+ out, err := runApp(
+ &Flags{ShowVersion: true},
+ []string{},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Contains(t, out, "Version:\t development")
+ assert.Nil(t, err)
+}
+
+func TestAnalyzePath(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null"},
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Contains(t, out, "nested")
+ assert.Nil(t, err)
+}
+
+func TestAnalyzePathWithShowItemCountNonInteractive(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", ShowItemCount: true},
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Nil(t, err)
+ assert.Regexp(t, regexp.MustCompile(`(?m)\s+\d+\s+/nested$`), out)
+}
+
+func TestSequentialScanning(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", SequentialScanning: true},
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Contains(t, out, "nested")
+ assert.Nil(t, err)
+}
+
+func TestFollowSymlinks(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", FollowSymlinks: true},
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Contains(t, out, "nested")
+ assert.Nil(t, err)
+}
+
+func TestShowAnnexedSize(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", ShowAnnexedSize: true},
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Contains(t, out, "nested")
+ assert.Nil(t, err)
+}
+
+func TestAnalyzePathProfiling(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", Profiling: true},
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Contains(t, out, "nested")
+ assert.Nil(t, err)
+}
+
+func TestAnalyzePathWithIgnoring(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{
+ LogFile: "/dev/null",
+ IgnoreDirPatterns: []string{"/(abc)+"},
+ NoHidden: true,
+ },
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Contains(t, out, "nested")
+ assert.Nil(t, err)
+}
+
+func TestAnalyzePathWithIgnoringPatternError(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{
+ LogFile: "/dev/null",
+ IgnoreDirPatterns: []string{"[[["},
+ NoHidden: true,
+ },
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Equal(t, out, "")
+ assert.NotNil(t, err)
+}
+
+func TestAnalyzePathWithIgnoringFromNotExistingFile(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{
+ LogFile: "/dev/null",
+ IgnoreFromFile: "file",
+ NoHidden: true,
+ },
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Equal(t, out, "")
+ assert.NotNil(t, err)
+}
+
+func TestAnalyzePathWithGui(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null"},
+ []string{"test_dir"},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Empty(t, out)
+ assert.Nil(t, err)
+}
+
+func TestAnalyzePathWithGuiNoColor(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", NoColor: true},
+ []string{"test_dir"},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Empty(t, out)
+ assert.Nil(t, err)
+}
+
+func TestGuiShowMTimeAndItemCount(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", ShowItemCount: true, ShowMTime: true},
+ []string{"test_dir"},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Empty(t, out)
+ assert.Nil(t, err)
+}
+
+func TestGuiNoDelete(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", NoDelete: true},
+ []string{"test_dir"},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Empty(t, out)
+ assert.Nil(t, err)
+}
+
+func TestGuiNoViewFile(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", NoViewFile: true},
+ []string{"test_dir"},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Empty(t, out)
+ assert.Nil(t, err)
+}
+
+func TestGuiNoSpawnShell(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", NoSpawnShell: true},
+ []string{"test_dir"},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Empty(t, out)
+ assert.Nil(t, err)
+}
+
+func TestGuiDeleteInParallel(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", DeleteInParallel: true},
+ []string{"test_dir"},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Empty(t, out)
+ assert.Nil(t, err)
+}
+
+func TestAnalyzePathWithGuiBackgroundDeletion(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", DeleteInBackground: true},
+ []string{"test_dir"},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Empty(t, out)
+ assert.Nil(t, err)
+}
+
+func TestAnalyzePathWithDefaultSorting(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{
+ LogFile: "/dev/null",
+ Sorting: Sorting{
+ By: "name",
+ Order: "asc",
+ },
+ },
+ []string{"test_dir"},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Empty(t, out)
+ assert.Nil(t, err)
+}
+
+func TestAnalyzePathWithStyle(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{
+ LogFile: "/dev/null",
+ Style: Style{
+ SelectedRow: ColorStyle{
+ TextColor: "black",
+ BackgroundColor: "red",
+ },
+ ProgressModal: ProgressModalOpts{
+ CurrentItemNameMaxLen: 10,
+ },
+ Footer: FooterColorStyle{
+ TextColor: "black",
+ BackgroundColor: "red",
+ NumberColor: "white",
+ },
+ Header: HeaderColorStyle{
+ TextColor: "black",
+ BackgroundColor: "red",
+ Hidden: true,
+ },
+ ResultRow: ResultRowColorStyle{
+ NumberColor: "orange",
+ DirectoryColor: "blue",
+ },
+ UseOldSizeBar: true,
+ },
+ },
+ []string{"test_dir"},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Empty(t, out)
+ assert.Nil(t, err)
+}
+
+func TestAnalyzePathNoUnicode(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{
+ LogFile: "/dev/null",
+ NoUnicode: true,
+ },
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Contains(t, out, "nested")
+ assert.Nil(t, err)
+}
+
+func TestAnalyzePathWithExport(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ defer func() {
+ os.Remove("output.json")
+ }()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", OutputFile: "output.json"},
+ []string{"test_dir"},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.NotEmpty(t, out)
+ assert.Nil(t, err)
+}
+
+func TestAnalyzePathWithChdir(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{
+ LogFile: "/dev/null",
+ ChangeCwd: true,
+ },
+ []string{"test_dir"},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Empty(t, out)
+ assert.Nil(t, err)
+}
+
+func TestReadAnalysisFromFile(t *testing.T) {
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", InputFile: "../../../internal/testdata/test.json"},
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.NotEmpty(t, out)
+ assert.Contains(t, out, "main.go")
+ assert.Nil(t, err)
+}
+
+func TestReadWrongAnalysisFromFile(t *testing.T) {
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", InputFile: "../../../internal/testdata/wrong.json"},
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Empty(t, out)
+ assert.Contains(t, err.Error(), "array of maps not found")
+}
+
+func TestWrongCombinationOfPrefixes(t *testing.T) {
+ out, err := runApp(
+ &Flags{NoPrefix: true, UseSIPrefix: true},
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Empty(t, out)
+ assert.Contains(t, err.Error(), "cannot be used at once")
+}
+
+func TestReadWrongAnalysisFromNotExistingFile(t *testing.T) {
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", InputFile: "xxx.json"},
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Empty(t, out)
+ assert.Contains(t, err.Error(), "no such file or directory")
+}
+
+func TestAnalyzePathWithErr(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := bytes.NewBufferString("")
+
+ app := App{
+ Flags: &Flags{LogFile: "/dev/null"},
+ Args: []string{"xxx"},
+ Istty: false,
+ Writer: buff,
+ TermApp: testapp.CreateMockedApp(false),
+ Getter: testdev.DevicesInfoGetterMock{},
+ PathChecker: os.Stat,
+ }
+ err := app.Run()
+
+ assert.Equal(t, "", strings.TrimSpace(buff.String()))
+ assert.Contains(t, err.Error(), "no such file or directory")
+}
+
+func TestNoCross(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", NoCross: true},
+ []string{"test_dir"},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Contains(t, out, "nested")
+ assert.Nil(t, err)
+}
+
+func TestListDevices(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", ShowDisks: true},
+ []string{},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Contains(t, out, "Device")
+ assert.Nil(t, err)
+}
+
+func TestListDevicesToFile(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ defer func() {
+ os.Remove("output.json")
+ }()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", ShowDisks: true, OutputFile: "output.json"},
+ []string{},
+ false,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Equal(t, "", out)
+ assert.Contains(t, err.Error(), "not supported")
+}
+
+func TestListDevicesWithGui(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", ShowDisks: true},
+ []string{},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Nil(t, err)
+ assert.Empty(t, out)
+}
+
+func TestMaxCores(t *testing.T) {
+ _, err := runApp(
+ &Flags{LogFile: "/dev/null", MaxCores: 1},
+ []string{},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.Equal(t, 1, runtime.GOMAXPROCS(0))
+ assert.Nil(t, err)
+}
+
+func TestMaxCoresHighEdge(t *testing.T) {
+ if runtime.NumCPU() < 2 {
+ t.Skip("Skipping on a single core CPU")
+ }
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", MaxCores: runtime.NumCPU() + 1},
+ []string{},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.NotEqual(t, runtime.NumCPU(), runtime.GOMAXPROCS(0))
+ assert.Empty(t, out)
+ assert.Nil(t, err)
+}
+
+func TestMaxCoresLowEdge(t *testing.T) {
+ if runtime.NumCPU() < 2 {
+ t.Skip("Skipping on a single core CPU")
+ }
+ out, err := runApp(
+ &Flags{LogFile: "/dev/null", MaxCores: -100},
+ []string{},
+ true,
+ testdev.DevicesInfoGetterMock{},
+ )
+
+ assert.NotEqual(t, runtime.NumCPU(), runtime.GOMAXPROCS(0))
+ assert.Empty(t, out)
+ assert.Nil(t, err)
+}
+
+// nolint: unparam // Why: it's used in linux tests
+func runApp(flags *Flags, args []string, istty bool, getter device.DevicesInfoGetter) (output string, err error) {
+ buff := bytes.NewBufferString("")
+
+ app := App{
+ Flags: flags,
+ Args: args,
+ Istty: istty,
+ Writer: buff,
+ TermApp: testapp.CreateMockedApp(false),
+ Getter: getter,
+ PathChecker: testdir.MockedPathChecker,
+ }
+ err = app.Run()
+
+ return strings.TrimSpace(buff.String()), err
+}
--- /dev/null
+package main
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+
+ "github.com/gdamore/tcell/v2"
+ "github.com/mattn/go-isatty"
+ "github.com/rivo/tview"
+ log "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+ "gopkg.in/yaml.v3"
+
+ "github.com/dundee/gdu/v5/cmd/gdu/app"
+ "github.com/dundee/gdu/v5/pkg/device"
+)
+
+var (
+ af *app.Flags
+ configErr error
+)
+
+var rootCmd = &cobra.Command{
+ Use: "gdu [directory_to_scan]",
+ Short: "Pretty fast disk usage analyzer written in Go",
+ Long: `Pretty fast disk usage analyzer written in Go.
+
+Gdu is intended primarily for SSD disks where it can fully utilize parallel processing.
+However HDDs work as well, but the performance gain is not so huge.
+`,
+ Args: cobra.MaximumNArgs(1),
+ SilenceUsage: true,
+ RunE: runE,
+}
+
+// nolint:funlen // a lot of flags to initialize
+func init() {
+ af = &app.Flags{}
+ flags := rootCmd.Flags()
+ flags.StringVar(&af.CfgFile, "config-file", "", "Read config from file (default is $HOME/.gdu.yaml)")
+ flags.StringVarP(&af.LogFile, "log-file", "l", "/dev/null", "Path to a logfile")
+ flags.StringVarP(&af.OutputFile, "output-file", "o", "", "Export all info into file as JSON")
+ flags.StringVarP(&af.InputFile, "input-file", "f", "", "Import analysis from JSON file")
+ flags.IntVarP(&af.MaxCores, "max-cores", "m", runtime.NumCPU(), fmt.Sprintf("Set max cores that Gdu will use. %d cores available", runtime.NumCPU()))
+ flags.BoolVar(&af.SequentialScanning, "sequential", false, "Use sequential scanning (intended for rotating HDDs)")
+ flags.BoolVarP(&af.ShowVersion, "version", "v", false, "Print version")
+
+ flags.StringSliceVarP(&af.TypeFilter, "type", "T", []string{}, "File types to include (e.g., --type yaml,json)")
+ flags.StringSliceVarP(&af.ExcludeTypeFilter, "exclude-type", "E", []string{}, "File types to exclude (e.g., --exclude-type yaml,json)")
+ flags.StringSliceVarP(&af.IgnoreDirs, "ignore-dirs", "i", []string{"/proc", "/dev", "/sys", "/run"},
+ "Paths to ignore (separated by comma). Can be absolute or relative to current directory")
+ flags.StringSliceVarP(&af.IgnoreDirPatterns, "ignore-dirs-pattern", "I", []string{},
+ "Path patterns to ignore (separated by comma)")
+ flags.StringVarP(&af.IgnoreFromFile, "ignore-from", "X", "",
+ "Read path patterns to ignore from file")
+ flags.BoolVarP(&af.NoHidden, "no-hidden", "H", false, "Ignore hidden directories (beginning with dot)")
+ flags.BoolVarP(
+ &af.FollowSymlinks, "follow-symlinks", "L", false,
+ "Follow symlinks for files, i.e. show the size of the file to which symlink points to (symlinks to directories are not followed)",
+ )
+ flags.BoolVarP(
+ &af.ShowAnnexedSize, "show-annexed-size", "A", false,
+ "Use apparent size of git-annex'ed files in case files are not present locally (real usage is zero)",
+ )
+ flags.BoolVarP(&af.NoCross, "no-cross", "x", false, "Do not cross filesystem boundaries")
+ flags.BoolVar(&af.Profiling, "enable-profiling", false, "Enable collection of profiling data and provide it on http://localhost:6060/debug/pprof/")
+
+ flags.StringVarP(&af.DbPath, "db", "D", "", "Store analysis in database (*.sqlite for SQLite, *.badger for BadgerDB)")
+ flags.BoolVarP(&af.ReadFromStorage, "read-from-storage", "r", false, "Use existing database instead of re-scanning")
+ flags.BoolVar(&af.ArchiveBrowsing, "archive-browsing", false, "Enable browsing of zip/jar archives")
+ flags.BoolVar(&af.CollapsePath, "collapse-path", false, "Collapse single-child directory chains")
+
+ flags.BoolVarP(&af.ShowDisks, "show-disks", "d", false, "Show all mounted disks")
+ flags.BoolVarP(&af.ShowApparentSize, "show-apparent-size", "a", false, "Show apparent size")
+ flags.BoolVarP(&af.ShowRelativeSize, "show-relative-size", "B", false, "Show relative size")
+ flags.BoolVarP(&af.NoColor, "no-color", "c", false, "Do not use colorized output")
+ flags.BoolVarP(&af.ShowItemCount, "show-item-count", "C", false, "Show number of items in directory")
+ flags.BoolVarP(&af.ShowMTime, "show-mtime", "M", false, "Show latest mtime of items in directory")
+ flags.BoolVarP(&af.NonInteractive, "non-interactive", "n", false, "Do not run in interactive mode")
+ flags.BoolVarP(&af.NoProgress, "no-progress", "p", false, "Do not show progress in non-interactive mode")
+ flags.BoolVarP(&af.NoUnicode, "no-unicode", "u", false, "Do not use Unicode symbols (for size bar)")
+ flags.BoolVarP(&af.Summarize, "summarize", "s", false, "Show only a total in non-interactive mode")
+ flags.IntVarP(&af.Top, "top", "t", 0, "Show only top X largest files in non-interactive mode")
+ flags.IntVar(&af.Depth, "depth", 0, "Show directory structure up to specified depth in non-interactive mode (0 means the flag is ignored)")
+ flags.BoolVar(&af.UseSIPrefix, "si", false, "Show sizes with decimal SI prefixes (kB, MB, GB) instead of binary prefixes (KiB, MiB, GiB)")
+ flags.BoolVar(&af.NoPrefix, "no-prefix", false, "Show sizes as raw numbers without any prefixes (SI or binary) in non-interactive mode")
+ flags.BoolVarP(&af.ShowInKiB, "show-in-kib", "k", false, "Show sizes in KiB (or kB with --si) in non-interactive mode")
+ flags.BoolVar(&af.ReverseSort, "reverse-sort", false, "Reverse sorting order (smallest to largest) in non-interactive mode")
+ flags.BoolVar(&af.Mouse, "mouse", false, "Use mouse")
+ flags.BoolVar(&af.NoDelete, "no-delete", false, "Do not allow deletions")
+ flags.BoolVar(&af.NoViewFile, "no-view-file", false, "Do not allow viewing file contents")
+ flags.BoolVar(&af.NoSpawnShell, "no-spawn-shell", false, "Do not allow spawning shell")
+ flags.BoolVar(&af.WriteConfig, "write-config", false, "Write current configuration to file (default is $HOME/.gdu.yaml)")
+ flags.StringVar(
+ &af.Since, "since", "",
+ "Include files with mtime >= WHEN. WHEN accepts RFC3339 timestamp (e.g., 2025-08-11T01:00:00-07:00) "+
+ "or date only YYYY-MM-DD (calendar-day compare; includes the whole day)",
+ )
+ flags.StringVar(&af.Until, "until", "", "Include files with mtime <= WHEN. WHEN accepts RFC3339 timestamp or date only YYYY-MM-DD")
+ flags.StringVar(&af.MaxAge, "max-age", "", "Include files with mtime no older than DURATION (e.g., 7d, 2h30m, 1y2mo)")
+ flags.StringVar(&af.MinAge, "min-age", "", "Include files with mtime at least DURATION old (e.g., 30d, 1w)")
+
+ initConfig()
+ setDefaults()
+}
+
+func initConfig() {
+ setConfigFilePath()
+ data, err := os.ReadFile(af.CfgFile)
+ if err != nil {
+ configErr = err
+ return // config file does not exist, return
+ }
+
+ configErr = yaml.Unmarshal(data, &af)
+}
+
+func setDefaults() {
+ if af.Style.Footer.BackgroundColor == "" {
+ af.Style.Footer.BackgroundColor = "#2479D0"
+ }
+ if af.Style.Footer.TextColor == "" {
+ af.Style.Footer.TextColor = "#000000"
+ }
+ if af.Style.Footer.NumberColor == "" {
+ af.Style.Footer.NumberColor = "#FFFFFF"
+ }
+ if af.Style.Header.BackgroundColor == "" {
+ af.Style.Header.BackgroundColor = "#2479D0"
+ }
+ if af.Style.Header.TextColor == "" {
+ af.Style.Header.TextColor = "#000000"
+ }
+ if af.Style.ResultRow.NumberColor == "" {
+ af.Style.ResultRow.NumberColor = "#e67100"
+ }
+ if af.Style.ResultRow.DirectoryColor == "" {
+ af.Style.ResultRow.DirectoryColor = "#3498db"
+ }
+}
+
+func setConfigFilePath() {
+ command := strings.Join(os.Args, " ")
+ if strings.Contains(command, "--config-file") {
+ re := regexp.MustCompile("--config-file[= ]([^ ]+)")
+ parts := re.FindStringSubmatch(command)
+
+ if len(parts) > 1 {
+ af.CfgFile = parts[1]
+ return
+ }
+ }
+ setDefaultConfigFilePath()
+}
+
+func setDefaultConfigFilePath() {
+ home, err := os.UserHomeDir()
+ if err != nil {
+ configErr = err
+ return
+ }
+
+ path := filepath.Join(home, ".config", "gdu", "gdu.yaml")
+ if _, err := os.Stat(path); err == nil {
+ af.CfgFile = path
+ return
+ }
+
+ af.CfgFile = filepath.Join(home, ".gdu.yaml")
+}
+
+func runE(command *cobra.Command, args []string) error {
+ var (
+ termApp *tview.Application
+ screen tcell.Screen
+ err error
+ )
+
+ if af.WriteConfig {
+ data, err := yaml.Marshal(af)
+ if err != nil {
+ return fmt.Errorf("error marshaling config file: %w", err)
+ }
+ if af.CfgFile == "" {
+ setDefaultConfigFilePath()
+ }
+ err = os.WriteFile(af.CfgFile, data, 0o600)
+ if err != nil {
+ return fmt.Errorf("error writing config file %s: %w", af.CfgFile, err)
+ }
+ }
+
+ if runtime.GOOS == "windows" && af.LogFile == "/dev/null" {
+ af.LogFile = "nul"
+ }
+
+ var f *os.File
+ if af.LogFile == "-" {
+ f = os.Stdout
+ } else {
+ f, err = os.OpenFile(af.LogFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
+ if err != nil {
+ return fmt.Errorf("opening log file: %w", err)
+ }
+ defer func() {
+ cerr := f.Close()
+ if cerr != nil {
+ panic(cerr)
+ }
+ }()
+ }
+ log.SetOutput(f)
+
+ if configErr != nil {
+ log.Printf("Error reading config file: %s", configErr.Error())
+ }
+
+ istty := isatty.IsTerminal(os.Stdout.Fd())
+
+ // we are not able to analyze disk usage on Windows and Plan9
+ if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
+ af.ShowApparentSize = true
+ }
+
+ if !af.ShouldRunInNonInteractiveMode(istty) {
+ screen, err = tcell.NewScreen()
+ if err != nil {
+ return fmt.Errorf("error creating screen: %w", err)
+ }
+ defer screen.Clear()
+ defer screen.Fini()
+
+ termApp = tview.NewApplication()
+ termApp.SetScreen(screen)
+
+ if af.Mouse {
+ termApp.EnableMouse(true)
+ }
+ }
+
+ a := app.App{
+ Flags: af,
+ Args: args,
+ Istty: istty,
+ Writer: os.Stdout,
+ TermApp: termApp,
+ Screen: screen,
+ Getter: device.Getter,
+ PathChecker: os.Stat,
+ }
+ return a.Run()
+}
+
+func main() {
+ if err := rootCmd.Execute(); err != nil {
+ os.Exit(1)
+ }
+}
--- /dev/null
+package main
+
+import "testing"
+
+func TestNoViewFileFlagRegistered(t *testing.T) {
+ flag := rootCmd.Flags().Lookup("no-view-file")
+ if flag == nil {
+ t.Fatal("expected no-view-file flag to be registered")
+ }
+}
+
+func TestNoViewFileFlagCanBeSet(t *testing.T) {
+ t.Cleanup(func() {
+ _ = rootCmd.Flags().Set("no-view-file", "false")
+ })
+
+ err := rootCmd.Flags().Set("no-view-file", "true")
+ if err != nil {
+ t.Fatalf("expected setting no-view-file flag to succeed: %v", err)
+ }
+
+ if !af.NoViewFile {
+ t.Fatal("expected NoViewFile to be true after setting flag")
+ }
+}
--- /dev/null
+coverage:
+ status:
+ project:
+ default:
+ target: auto
+ threshold: 2%
+ informational: true
+ patch:
+ default:
+ informational: true
\ No newline at end of file
--- /dev/null
+# YAML file configuration options
+
+Gdu provides an additional set of configuration options to the usual command line options.
+
+You can get the full list of all possible options by running:
+
+```
+gdu --write-config
+```
+
+This will create file `$HOME/.gdu.yaml` with all the options set to default values.
+
+Let's go through them one by one:
+
+#### `log-file`
+
+Path to a logfile (default "/dev/null")
+
+#### `input-file`
+
+Import analysis from JSON file
+
+#### `output-file`
+
+Export all info into file as JSON
+
+#### `ignore-dirs`
+
+Paths to ignore (separated by comma). Can be absolute (like `/proc`) or relative to the current working directory (like `node_modules`). Default values are [/proc,/dev,/sys,/run].
+
+#### `ignore-dir-patterns`
+
+Path patterns to ignore (separated by comma). Patterns can be absolute or relative to the current working directory.
+
+#### `ignore-from-file`
+
+Read path patterns to ignore from file. Patterns can be absolute or relative to the current working directory.
+
+#### `max-cores`
+
+Set max cores that Gdu will use.
+
+#### `sequential-scanning`
+
+Use sequential scanning (intended for rotating HDDs)
+
+#### `show-apparent-size`
+
+Show apparent size
+
+#### `show-relative-size`
+
+Show relative size
+
+#### `show-item-count`
+
+Show number of items in directory
+
+#### `no-color`
+
+Do not use colorized output
+
+#### `mouse`
+
+Use mouse
+
+#### `non-interactive`
+
+Do not run in interactive mode
+
+#### `no-progress`
+
+Do not show progress in non-interactive mode
+
+#### `no-cross`
+
+Do not cross filesystem boundaries
+
+#### `no-hidden`
+
+Ignore hidden directories (beginning with dot)
+
+#### `no-delete`
+
+Do not allow deletions
+
+#### `no-view-file`
+
+Do not allow viewing file contents
+
+#### `follow-symlinks`
+
+Follow symlinks for files, i.e. show the size of the file to which symlink points to (symlinks to directories are not followed)
+
+#### `profiling`
+
+Enable collection of profiling data and provide it on http://localhost:6060/debug/pprof/
+
+#### `read-from-storage`
+
+Read analysis data from persistent key-value storage
+
+#### `summarize`
+
+Show only a total in non-interactive mode
+
+#### `use-si-prefix`
+
+Show sizes with decimal SI prefixes (kB, MB, GB) instead of binary prefixes (KiB, MiB, GiB)
+
+#### `no-prefix`
+
+Show sizes as raw numbers without any prefixes (SI or binary) in non-interactive mode
+
+#### `reverse-sort`
+
+Reverse sorting order (smallest to largest) in non-interactive mode
+
+#### `change-cwd`
+
+Set CWD variable when browsing directories
+
+#### `delete-in-background`
+
+Delete items in the background, not blocking the UI from work
+
+#### `delete-in-parallel`
+
+Delete items in parallel, which might increase the speed of deletion
+
+#### `browse-parent-dirs`
+
+Allow navigating above the launch directory by pressing the left arrow key. When enabled, pressing left at the top-level directory will rescan and open its parent directory. Disabled by default.
+
+
+#### `style.selected-row.text-color`
+
+Color of text for the selected row
+
+#### `style.selected-row.background-color`
+
+Background color for the selected row
+
+#### `style.progress-modal.current-item-path-max-len`
+
+Maximum length of file path for the current item in progress bar.
+When the length is reached, the path is shortened with "/.../".
+
+#### `style.use-old-size-bar`
+
+Show size bar without Unicode symbols.
+
+#### `style.footer.text-color`
+
+Color of text for footer bar
+
+#### `style.footer.background-color`
+
+Background color for footer bar
+
+#### `style.footer.number-color`
+
+Color of numbers displayed in the footer
+
+#### `style.header.text-color`
+
+Color of text for header bar
+
+#### `style.header.background-color`
+
+Background color for header bar
+
+#### `style.header.hidden`
+
+Hide the header bar
+
+#### `style.result-row.number-color`
+
+Color of numbers in result rows
+
+#### `style.result-row.directory-color`
+
+Color of directory names in result rows
+
+#### `sorting.by`
+
+Sort items. Possible values:
+* name - name of the item
+* size - usage or apparent size
+* itemCount - number of items in the folder tree
+* mtime - modification time
+
+#### `sorting.order`
+
+Set sorting order. Possible values:
+* asc - ascending order
+* desc - descending order
--- /dev/null
+# Release process
+
+1. update usage in README.md and gdu.1.md
+1. `make show-man`
+1. `make man`
+1. commit the changes
+1. tag new version with `-sa`
+1. `make`
+1. `git push --tags`
+1. `git push`
+1. `make release`
+1. update `gdu.spec`
+1. Release snapcraft, AUR, ...
--- /dev/null
+.\" Automatically generated by Pandoc 3.4
+.\"
+.TH "gdu" "1" "2026\-03\-09" ""
+.SH NAME
+gdu \- Pretty fast disk usage analyzer written in Go
+.SH SYNOPSIS
+\f[B]gdu [flags] [directory_to_scan]\f[R]
+.SH DESCRIPTION
+Pretty fast disk usage analyzer written in Go.
+.PP
+Gdu is intended primarily for SSD disks where it can fully utilize
+parallel processing.
+However HDDs work as well, but the performance gain is not so huge.
+.SH OPTIONS
+\f[B]\-h\f[R], \f[B]\-\-help\f[R][=false] help for gdu
+.PP
+\f[B]\-i\f[R], \f[B]\-\-ignore\-dirs\f[R]=[/proc,/dev,/sys,/run] Paths
+to ignore (separated by comma).
+Supports both absolute and relative paths.
+.PP
+\f[B]\-I\f[R], \f[B]\-\-ignore\-dirs\-pattern\f[R] Path patterns to
+ignore (separated by comma).
+Supports both absolute and relative path patterns.
+.PP
+\f[B]\-X\f[R], \f[B]\-\-ignore\-from\f[R] Read path patterns to ignore
+from file.
+Supports both absolute and relative path patterns.
+.PP
+\f[B]\-T\f[R], \f[B]\-\-type\f[R] File types to include (e.g., \[en]type
+yaml,json)
+.PP
+\f[B]\-E\f[R], \f[B]\-\-exclude\-type\f[R] File types to exclude (e.g.,
+\[en]exclude\-type yaml,json)
+.PP
+\f[B]\-\-max\-age\f[R] Include files with mtime no older than DURATION
+(e.g., 7d, 2h30m, 1y2mo)
+.PP
+\f[B]\-\-min\-age\f[R] Include files with mtime at least DURATION old
+(e.g., 30d, 1w)
+.PP
+\f[B]\-\-since\f[R] Include files with mtime >= WHEN.
+WHEN accepts RFC3339 timestamp (e.g., 2025\-08\-11T01:00:00\-07:00) or
+date only YYYY\-MM\-DD (calendar\-day compare; includes the whole day)
+.PP
+\f[B]\-\-until\f[R] Include files with mtime <= WHEN.
+WHEN accepts RFC3339 timestamp or date only YYYY\-MM\-DD
+.PP
+\f[B]\-l\f[R], \f[B]\-\-log\-file\f[R]=\[dq]/dev/null\[dq] Path to a
+logfile
+.PP
+\f[B]\-m\f[R], \f[B]\-\-max\-cores\f[R] Set max cores that Gdu will use.
+.PP
+\f[B]\-c\f[R], \f[B]\-\-no\-color\f[R][=false] Do not use colorized
+output
+.PP
+\f[B]\-x\f[R], \f[B]\-\-no\-cross\f[R][=false] Do not cross filesystem
+boundaries
+.PP
+\f[B]\-H\f[R], \f[B]\-\-no\-hidden\f[R][=false] Ignore hidden
+directories (beginning with dot)
+.PP
+\f[B]\-L\f[R], \f[B]\-\-follow\-symlinks\f[R][=false] Follow symlinks
+for files, i.e.\ show the size of the file to which symlink points to
+(symlinks to directories are not followed)
+.PP
+\f[B]\-n\f[R], \f[B]\-\-non\-interactive\f[R][=false] Do not run in
+interactive mode
+.PP
+\f[B]\-p\f[R], \f[B]\-\-no\-progress\f[R][=false] Do not show progress
+in non\-interactive mode
+.PP
+\f[B]\-u\f[R], \f[B]\-\-no\-unicode\f[R][=false] Do not use Unicode
+symbols (for size bar)
+.PP
+\f[B]\-s\f[R], \f[B]\-\-summarize\f[R][=false] Show only a total in
+non\-interactive mode
+.PP
+\f[B]\-t\f[R], \f[B]\-\-top\f[R][=0] Show only top X largest files in
+non\-interactive mode
+.PP
+\f[B]\-d\f[R], \f[B]\-\-show\-disks\f[R][=false] Show all mounted disks
+.PP
+\f[B]\-a\f[R], \f[B]\-\-show\-apparent\-size\f[R][=false] Show apparent
+size
+.PP
+\f[B]\-C\f[R], \f[B]\-\-show\-item\-count\f[R][=false] Show number of
+items in directory
+.PP
+\f[B]\-k\f[R], \f[B]\-\-show\-in\-kib\f[R][=false] Show sizes in KiB (or
+kB with \[en]si) in non\-interactive mode
+.PP
+\f[B]\-M\f[R], \f[B]\-\-show\-mtime\f[R][=false] Show latest mtime of
+items in directory
+.PP
+\f[B]\-\-archive\-browsing\f[R][=false] Enable browsing of zip/jar
+archives
+.PP
+\f[B]\-\-depth\f[R][=0] Show directory structure up to specified depth
+in non\-interactive mode (0 means the flag is ignored)
+.PP
+\f[B]\-\-collapse\-path\f[R][=false] Collapse single\-child directory
+chains
+.PP
+\f[B]\-\-mouse\f[R][=false] Use mouse
+.PP
+\f[B]\-\-si\f[R][=false] Show sizes with decimal SI prefixes (kB, MB,
+GB) instead of binary prefixes (KiB, MiB, GiB)
+.PP
+\f[B]\-\-no\-prefix\f[R][=false] Show sizes as raw numbers without any
+prefixes (SI or binary) in non\-interactive mode
+.PP
+\f[B]\-\-no\-spawn\-shell\f[R][=false] Do not allow spawning shell
+.PP
+\f[B]\-\-no\-delete\f[R][=false] Do not allow deletions
+.PP
+\f[B]\-\-no\-view\-file\f[R][=false] Do not allow viewing file contents
+.PP
+\f[B]\-f\f[R], \f[B]\-\-input\-file\f[R] Import analysis from JSON file.
+If the file is \[dq]\-\[dq], read from standard input.
+.PP
+\f[B]\-o\f[R], \f[B]\-\-output\-file\f[R] Export all info into file as
+JSON.
+If the file is \[dq]\-\[dq], write to standard output.
+.PP
+\f[B]\-\-config\-file\f[R]=\[dq]$HOME/.gdu.yaml\[dq] Read config from
+file
+.PP
+\f[B]\-\-write\-config\f[R][=false] Write current configuration to file
+(default is $HOME/.gdu.yaml)
+.PP
+\f[B]\-\-enable\-profiling\f[R][=false] Enable collection of profiling
+data and provide it on http://localhost:6060/debug/pprof/
+.PP
+\f[B]\-D\f[R], \f[B]\-\-db\f[R] Store analysis in database (\f[I].sqlite
+for SQLite, \f[R].badger for BadgerDB)
+.PP
+\f[B]\-r\f[R], \f[B]\-\-read\-from\-storage\f[R][=false] Use existing
+database instead of re\-scanning
+.PP
+\f[B]\-v\f[R], \f[B]\-\-version\f[R][=false] Print version
+.SH FILE FLAGS
+Files and directories may be prefixed by a one\-character flag with
+following meaning:
+.TP
+\f[B]!\f[R]
+An error occurred while reading this directory.
+.TP
+\f[B].\f[R]
+An error occurred while reading a subdirectory, size may be not correct.
+.TP
+\f[B]\[at]\f[R]
+File is symlink or socket.
+.TP
+\f[B]H\f[R]
+Same file was already counted (hard link).
+.TP
+\f[B]e\f[R]
+Directory is empty.
--- /dev/null
+---
+date: {{date}}
+section: 1
+title: gdu
+---
+
+# NAME
+
+gdu - Pretty fast disk usage analyzer written in Go
+
+# SYNOPSIS
+
+**gdu \[flags\] \[directory_to_scan\]**
+
+# DESCRIPTION
+
+Pretty fast disk usage analyzer written in Go.
+
+Gdu is intended primarily for SSD disks where it can fully utilize
+parallel processing. However HDDs work as well, but the performance gain
+is not so huge.
+
+# OPTIONS
+
+**-h**, **\--help**\[=false\] help for gdu
+
+**-i**, **\--ignore-dirs**=\[/proc,/dev,/sys,/run\]
+ Paths to ignore (separated by comma).
+ Supports both absolute and relative paths.
+
+**-I**, **\--ignore-dirs-pattern**
+ Path patterns to ignore (separated by comma).
+ Supports both absolute and relative path patterns.
+
+**-X**, **\--ignore-from**
+ Read path patterns to ignore from file.
+ Supports both absolute and relative path patterns.
+
+**-T**, **\--type** File types to include (e.g., --type yaml,json)
+
+**-E**, **\--exclude-type** File types to exclude (e.g., --exclude-type yaml,json)
+
+**\--max-age** Include files with mtime no older than DURATION (e.g., 7d, 2h30m, 1y2mo)
+
+**\--min-age** Include files with mtime at least DURATION old (e.g., 30d, 1w)
+
+**\--since** Include files with mtime >= WHEN. WHEN accepts RFC3339 timestamp (e.g., 2025-08-11T01:00:00-07:00) or date only YYYY-MM-DD (calendar-day compare; includes the whole day)
+
+**\--until** Include files with mtime <= WHEN. WHEN accepts RFC3339 timestamp or date only YYYY-MM-DD
+
+**-l**, **\--log-file**=\"/dev/null\" Path to a logfile
+
+**-m**, **\--max-cores** Set max cores that Gdu will use.
+
+**-c**, **\--no-color**\[=false\] Do not use colorized output
+
+**-x**, **\--no-cross**\[=false\] Do not cross filesystem boundaries
+
+**-H**, **\--no-hidden**\[=false\] Ignore hidden directories (beginning with dot)
+
+**-L**, **\--follow-symlinks**\[=false\] Follow symlinks for files, i.e. show the
+size of the file to which symlink points to (symlinks to directories are not followed)
+
+**-n**, **\--non-interactive**\[=false\] Do not run in interactive mode
+
+**-p**, **\--no-progress**\[=false\] Do not show progress in
+non-interactive mode
+
+**-u**, **\--no-unicode**\[=false\] Do not use Unicode symbols (for size bar)
+
+**-s**, **\--summarize**\[=false\] Show only a total in non-interactive mode
+
+**-t**, **\--top**\[=0\] Show only top X largest files in non-interactive mode
+
+**-d**, **\--show-disks**\[=false\] Show all mounted disks
+
+**-a**, **\--show-apparent-size**\[=false\] Show apparent size
+
+**-C**, **\--show-item-count**\[=false\] Show number of items in directory
+
+**-k**, **\--show-in-kib**\[=false\] Show sizes in KiB (or kB with --si) in non-interactive mode
+
+**-M**, **\--show-mtime**\[=false\] Show latest mtime of items in directory
+
+**\--archive-browsing**\[=false\] Enable browsing of zip/jar archives
+
+**\--depth**\[=0\] Show directory structure up to specified depth in non-interactive mode (0 means the flag is ignored)
+
+**\--collapse-path**\[=false\] Collapse single-child directory chains
+
+**\--mouse**\[=false\] Use mouse
+
+**\--si**\[=false\] Show sizes with decimal SI prefixes (kB, MB, GB) instead of binary prefixes (KiB, MiB, GiB)
+
+**\--no-prefix**\[=false\] Show sizes as raw numbers without any prefixes (SI or binary) in non-interactive mode
+
+**\--no-spawn-shell**\[=false\] Do not allow spawning shell
+
+**\--no-delete**\[=false\] Do not allow deletions
+
+**\--no-view-file**\[=false\] Do not allow viewing file contents
+
+**-f**, **\--input-file** Import analysis from JSON file. If the file is \"-\", read from standard input.
+
+**-o**, **\--output-file** Export all info into file as JSON. If the file is \"-\", write to standard output.
+
+**\--config-file**=\"$HOME/.gdu.yaml\" Read config from file
+
+**\--write-config**\[=false\] Write current configuration to file (default is $HOME/.gdu.yaml)
+
+**\--enable-profiling**\[=false\] Enable collection of profiling data and provide it on http://localhost:6060/debug/pprof/
+
+**-D**, **\--db** Store analysis in database (*.sqlite for SQLite, *.badger for BadgerDB)
+
+**-r**, **\--read-from-storage**\[=false\] Use existing database instead of re-scanning
+
+**-v**, **\--version**\[=false\] Print version
+
+# FILE FLAGS
+
+Files and directories may be prefixed by a one-character
+flag with following meaning:
+
+**!**
+
+: An error occurred while reading this directory.
+
+**.**
+
+: An error occurred while reading a subdirectory, size may be not correct.
+
+**\@**
+
+: File is symlink or socket.
+
+**H**
+
+: Same file was already counted (hard link).
+
+**e**
+
+: Directory is empty.
--- /dev/null
+Name: gdu
+Version: 5.32.0
+Release: 1
+Summary: Pretty fast disk usage analyzer written in Go
+
+License: MIT
+URL: https://github.com/dundee/gdu
+
+Source0: https://github.com/dundee/gdu/archive/refs/tags/v%{version}.tar.gz
+
+BuildRequires: golang
+BuildRequires: systemd-rpm-macros
+BuildRequires: git
+
+Provides: %{name} = %{version}
+
+%description
+Pretty fast disk usage analyzer written in Go.
+
+%global debug_package %{nil}
+
+%prep
+%autosetup -n %{name}-%{version}
+
+%build
+export GOINSECURE=go.opencensus.io
+GO111MODULE=on CGO_ENABLED=0 go build \
+-trimpath \
+-buildmode=pie \
+-mod=readonly \
+-modcacherw \
+-ldflags \
+"-s -w \
+-X 'github.com/dundee/gdu/v5/build.Version=v%{version}' \
+-X 'github.com/dundee/gdu/v5/build.User=$(id -u -n)' \
+-X 'github.com/dundee/gdu/v5/build.Time=$(LC_ALL=en_US.UTF-8 date)'" \
+-o %{name} github.com/dundee/gdu/v5/cmd/gdu
+
+%install
+rm -rf $RPM_BUILD_ROOT
+install -Dpm 0755 %{name} %{buildroot}%{_bindir}/%{name}
+install -Dpm 0755 %{name}.1 $RPM_BUILD_ROOT%{_mandir}/man1/gdu.1
+
+%check
+
+%post
+
+%preun
+
+%files
+%{_bindir}/gdu
+%{_mandir}/man1/gdu.1.gz
+
+%changelog
+* Sat Nov 22 2025 Daniel Milde - 5.32.0-1
+- feat: Add --no-spawn-shell flag to disable shell access by @ShivamB25 in #440
+- feat: Add --reverse-sort flag for non-interactive mode by @ShivamB25 in #436
+- feat: switch mouse flag name, mimic default ncdu behavior by @shantanugadgil in #420
+- feat: bump version of tcell, drop support for Golang 1.22 by @dundee in #432
+- fix: make 'no' the default button in delete/empty confirmation dialogs by @ShivamB25 in #437
+- fix: reorder --mouse option by @shantanugadgil in #433
+- fix: ulikunitz/xz package update to fix vulnerability by @jullianow in #446
+- ci: use Golang 1.24.4 by @shantanugadgil in #421
+- refactor: struct align by @dundee in #442
+- docs: fix link to configuration.md by @joliss in #392
+* Fri Jun 6 2025 Daniel Milde - 5.31.0-1
+- feat: relative path ignore support by @s0up4200 in #398
+- feat: Add support showing size of absent git-annex'ed files by @stv0g in #404
+- fix: ctrl_z corruption #253 by @yurenchen000 in #406
+- fix: item count for --show-item-count by @dundee in #416
+- fix: automatically run non-interactive when related flag set by @dundee in #418
+* Tue Feb 4 2025 - Danie de Jager - 5.30.1-2
+- fix: set "GOINSECURE=go.opencensus.io"
+* Mon Dec 30 2024 Daniel Milde - 5.30.1-1
+- fix: set default colors when config file does not exist
+* Mon Dec 30 2024 Daniel Milde - 5.30.0-1
+- feat: show top largest files using -t or --top option in #391
+- feat: introduce more style options in #396
+* Mon Jun 17 2024 Daniel Milde - 5.29.0-1
+- feat: support for reading gzip, bzip2 and xz files by @dundee in #363
+- feat: add --show-mtime (-M) option by @dundee in #350
+- feat: add option --no-unicode to disable unicode symbols by @dundee in #362
+- fix: division by zero error in formatFileRow by @xroberx in #359
+* Sun Apr 21 2024 Danie de Jager - 5.28.0-1
+- feat: delete/empty items in background by @dundee in #336
+- feat: add --show-item-count (-C) option by @ramgp in #332
+- feat: add --no-delete option by @ramgp in #333
+- feat: ignore item by pressing I by @dundee in #345
+- feat: delete directory items in parallel by @dundee in #340
+- feat: add --sequential option for sequential scanning by @dundee in #322
+* Sun Feb 18 2024 Danie de Jager - 5.27.0-1
+- feat: export in interactive mode by @kadogo in #298
+- feat: handle vim-style navigation in confirmation by @samihda in #283
+- fix: panic with Interface Conversion Nil Error by @ShivamB25 in #274
+- fix: Enter key properly working when reading analysis from file by @dundee in #312
+- fix: check if type matches for selected device by @dundee in #318
+- ci: package gdu in docker container by @rare-magma in #313
+- ci: add values for building gdu with tito by @daniejstriata in #288
+- ci: change Winget Releaser job to ubuntu-latest by @sitiom in #271
+* Tue Feb 13 2024 Danie de Jager - 5.26.0-1
+- feat: use key-value store for analysis data in #297
+- feat: use profile-guided optimization in #286
+* Fri Dec 1 2023 Danie de Jager - 5.25.0-2
+- Improved SPEC to build on AL2023.
+* Tue Jun 6 2023 Danie de Jager - 5.25.0-1
+- feat: use unicode block elements in size bar in #255
+* Thu Jun 1 2023 Danie de Jager - 5.24.0-1
+- feat: add ctrl+z for job control by @yurenchen000 in #250
+- feat: upgrade dependencies by @dundee in #252
+* Thu May 11 2023 Danie de Jager - 5.23.0-2
+- Compiled with golang 1.19.9
+* Tue Apr 11 2023 Danie de Jager - 5.23.0-1
+- feat: added configuration option to change CWD when browsing directories by @leapfog in #230
+- fix: do not show help modal when confirm modal is already opened by @dundee in #237
+* Mon Feb 6 2023 Danie de Jager - 5.22.0-1
+- feat: added option to follow symlinks in #206
+- fix: ignore mouse events when modal is opened in #205
+- Updated SPEC file used for rpm creation by @daniejstriata in #198
+* Mon Jan 9 2023 Danie de Jager - 5.21.1-2
+- updated SPEC file to support builds on Fedora
+* Mon Jan 9 2023 Danie de Jager - 5.21.1-1
+- fix: correct open command for Win
+* Wed Jan 4 2023 Danie de Jager - 5.21.0-1
+- feat: mark multiple items for deletion by @dundee in #193
+- feat: move cursor to next row when marked by @dundee in #194
+- Use GNU tar on Darwin to fix build error by @sryze in #188
+* Mon Oct 24 2022 Danie de Jager - 5.20.0-1
+- feat: set default sorting using config option
+- feat: open file or directory in external program
+- fix: check reference type
+* Wed Sep 28 2022 Danie de Jager - 5.19.0-1
+- feat: upgrade all dependencies
+- feat: bump go version to 1.18
+- feat: format negative numbers correctly
+- feat: try to read config from ~/.config/gdu/gdu.yaml first
+- test: export formatting
+- docs: config file default locations
+* Sun Sep 18 2022 Danie de Jager - 5.18.1-1
+- fix: correct config file option regex
+- fix: read non-default config file properly in #175
+- feat: crop current item path to 70 chars in #173
+- feat: show elapsed time in progress modal
+- feat: configuration option for setting maximum length of the path for current item in the progress modal in #174
+* Tue Sep 13 2022 Danie de Jager - 5.17.1-1
+- fix: nul log file for Windows (#171)
+- fix: increase the vertical size of the progress modal (#172)
+- feat: added possibility to change text and background color of the selected row by @dundee in #170
+* Thu Sep 8 2022 Danie de Jager - 5.16.0-1
+- feat: support for reading (and writing) configuration to YAML file
+- feat: initial mouse support by @dundee in #165
+- add mtime for Windows by @mcoret in #157
+- openbsd fixes by @dundee in #164
+* Wed Aug 10 2022 Danie de Jager - 5.15.0-1
+- feat: show sizes as raw numbers without prefixes by @dundee in #147
+- feat: natural sorting by @dundee in #156
+- fix: honor --summarize when reading analysis by @Riatre in #149
+- fix: upgrade dependencies by @phanirithvij in #153
+- ci: generate release tarballs with vendor directory by @CyberTailor in #148
+* Mon Jul 18 2022 Danie de Jager - 5.14.0-2
+* Thu May 26 2022 Danie de Jager - 5.14.0-1
+- sort items by name if usage/size/count is the same (#143)
+* Mon Feb 21 2022 Danie de Jager - 5.13.2
+- able to go back to devices list from analyzed directory
+* Thu Feb 10 2022 Danie de Jager - 5.13.1
+- properly count only the first hard link size on a rescan
+- do not panic if path does not start with a slash
+* Sat Jan 29 2022 Danie de Jager - 5.13.0-1
+- lower memory usage
+- possibility to toggle between bar graph relative to the size of the directory or the biggest file
+- added option --si for showing sizes with decimal SI prefixes
+- fixed freeze when r key binding is being hold
+* Tue Dec 14 2021 Danie de Jager - 5.12.1-1
+- Bump to 5.12.1-1
+- fixed listing devices on NetBSD
+- escape file names (#111)
+- fixed filtering
+* Fri Dec 3 2021 Danie de Jager - 5.12.0-1
+- Bump to 5.12.0-1
+* Fri Dec 3 2021 Danie de Jager - 5.11.0-2
+- Compile with go 1.17.4
+* Sun Nov 28 2021 Danie de Jager - 5.11.0-1
+- Bump to 5.11.0
+* Tue Nov 23 2021 Danie de Jager - 5.10.1-1
+- Bump to 5.10.1
+* Wed Nov 10 2021 Danie de Jager - 5.10.0-1
+- Bump to 5.10.01
+* Mon Oct 25 2021 Danie de Jager - 5.9.0-1
+- Bump to 5.9.0
+* Mon Sep 27 2021 Danie de Jager - 5.8.1-2
+- Remove pandoc requirement.
+* Sun Sep 26 2021 Danie de Jager - 5.8.1-1
+- Bump to 5.8.1
+* Thu Sep 23 2021 Danie de Jager - 5.8.0-2
+- Bump to 5.8.0
+* Tue Sep 7 2021 Danie de Jager - 5.7.0-1
+- Bump to 5.7.0
+* Sat Aug 28 2021 Danie de Jager - 5.6.2-1
+- Bump to 5.6.2
+- Compiled with go 1.17
+* Fri Aug 27 2021 Danie de Jager - 5.6.1-1
+- Bump to 5.6.1
+* Mon Aug 23 2021 Danie de Jager - 5.6.0-1
+- Bump to 5.6.0
+* Fri Aug 13 2021 Danie de Jager - 5.5.0-2
+- Compiled with go 1.16.7
+* Mon Aug 2 2021 Danie de Jager - 5.5.0-1
+- Bump to 5.5.0
+* Mon Jul 26 2021 Danie de Jager - 5.4.0-1
+- Bump to 5.4.0
+* Thu Jul 22 2021 Danie de Jager - 5.3.0-2
+- First release
--- /dev/null
+module github.com/dundee/gdu/v5
+
+go 1.24.0
+
+require (
+ github.com/dgraph-io/badger/v4 v4.9.1
+ github.com/fatih/color v1.18.0
+ github.com/gdamore/tcell/v2 v2.13.8
+ github.com/h2non/filetype v1.1.3
+ github.com/maruel/natural v1.3.0
+ github.com/mattn/go-isatty v0.0.20
+ github.com/pkg/errors v0.9.1
+ github.com/rivo/tview v0.42.0
+ github.com/sirupsen/logrus v1.9.4
+ github.com/spf13/cobra v1.10.2
+ github.com/stretchr/testify v1.11.1
+ github.com/ulikunitz/xz v0.5.15
+ golang.org/x/sys v0.41.0
+ golang.org/x/text v0.34.0
+ gopkg.in/yaml.v3 v3.0.1
+ modernc.org/sqlite v1.46.1
+)
+
+require (
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/dgraph-io/ristretto/v2 v2.3.0 // indirect
+ github.com/dustin/go-humanize v1.0.1 // indirect
+ github.com/gdamore/encoding v1.0.1 // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/google/flatbuffers v25.9.23+incompatible // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/klauspost/compress v1.18.1 // indirect
+ github.com/lucasb-eyer/go-colorful v1.3.0 // indirect
+ github.com/mattn/go-colorable v0.1.14 // indirect
+ github.com/ncruces/go-strftime v1.0.0 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
+ github.com/spf13/pflag v1.0.10 // indirect
+ go.opentelemetry.io/auto/sdk v1.2.1 // indirect
+ go.opentelemetry.io/otel v1.38.0 // indirect
+ go.opentelemetry.io/otel/metric v1.38.0 // indirect
+ go.opentelemetry.io/otel/trace v1.38.0 // indirect
+ golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
+ golang.org/x/net v0.47.0 // indirect
+ golang.org/x/term v0.37.0 // indirect
+ google.golang.org/protobuf v1.36.10 // indirect
+ modernc.org/libc v1.67.6 // indirect
+ modernc.org/mathutil v1.7.1 // indirect
+ modernc.org/memory v1.11.0 // indirect
+)
--- /dev/null
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgraph-io/badger/v4 v4.9.1 h1:DocZXZkg5JJHJPtUErA0ibyHxOVUDVoXLSCV6t8NC8w=
+github.com/dgraph-io/badger/v4 v4.9.1/go.mod h1:5/MEx97uzdPUHR4KtkNt8asfI2T4JiEiQlV7kWUo8c0=
+github.com/dgraph-io/ristretto/v2 v2.3.0 h1:qTQ38m7oIyd4GAed/QkUZyPFNMnvVWyazGXRwvOt5zk=
+github.com/dgraph-io/ristretto/v2 v2.3.0/go.mod h1:gpoRV3VzrEY1a9dWAYV6T1U7YzfgttXdd/ZzL1s9OZM=
+github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
+github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
+github.com/gdamore/encoding v1.0.1 h1:YzKZckdBL6jVt2Gc+5p82qhrGiqMdG/eNs6Wy0u3Uhw=
+github.com/gdamore/encoding v1.0.1/go.mod h1:0Z0cMFinngz9kS1QfMjCP8TY7em3bZYeeklsSDPivEo=
+github.com/gdamore/tcell/v2 v2.13.8 h1:Mys/Kl5wfC/GcC5Cx4C2BIQH9dbnhnkPgS9/wF3RlfU=
+github.com/gdamore/tcell/v2 v2.13.8/go.mod h1:+Wfe208WDdB7INEtCsNrAN6O2m+wsTPk1RAovjaILlo=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/google/flatbuffers v25.9.23+incompatible h1:rGZKv+wOb6QPzIdkM2KxhBZCDrA0DeN6DNmRDrqIsQU=
+github.com/google/flatbuffers v25.9.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
+github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg=
+github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
+github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
+github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag=
+github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
+github.com/maruel/natural v1.3.0 h1:VsmCsBmEyrR46RomtgHs5hbKADGRVtliHTyCOLFBpsg=
+github.com/maruel/natural v1.3.0/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
+github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
+github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
+github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/rivo/tview v0.42.0 h1:b/ftp+RxtDsHSaynXTbJb+/n/BxDEi+W3UfF5jILK6c=
+github.com/rivo/tview v0.42.0/go.mod h1:cSfIYfhpSGCjp3r/ECJb+GKS7cGJnqV8vfjQPwoXyfY=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
+github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
+github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
+github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
+github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
+github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
+github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY=
+github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
+go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
+go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
+go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
+go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
+go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
+go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY=
+golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
+golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
+golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
+golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
+golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
+golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
+golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
+golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
+google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
+modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
+modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
+modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=
+modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
+modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
+modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
+modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
+modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=
+modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
+modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
+modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
+modernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI=
+modernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE=
+modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
+modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
+modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
+modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
+modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
+modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
+modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
+modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
+modernc.org/sqlite v1.46.1 h1:eFJ2ShBLIEnUWlLy12raN0Z1plqmFX9Qe3rjQTKt6sU=
+modernc.org/sqlite v1.46.1/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA=
+modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
+modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
+modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
+modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
--- /dev/null
+// Package common contains commong logic and interfaces used across Gdu
+// nolint: revive //Why: this is common package
+package common
+
+import (
+ "time"
+
+ "github.com/dundee/gdu/v5/pkg/fs"
+)
+
+// CurrentProgress struct
+type CurrentProgress struct {
+ CurrentItemName string
+ ItemCount int64
+ TotalSize int64
+}
+
+// ShouldDirBeIgnored whether path should be ignored
+type ShouldDirBeIgnored func(name, path string) bool
+
+// ShouldFileBeIgnored whether file should be ignored based on type
+type ShouldFileBeIgnored func(name string) bool
+
+// Analyzer is type for dir analyzing function
+type Analyzer interface {
+ AnalyzeDir(path string, ignore ShouldDirBeIgnored, fileTypeFilter ShouldFileBeIgnored) fs.Item
+ SetFollowSymlinks(bool)
+ SetShowAnnexedSize(bool)
+ SetTimeFilter(timeFilter TimeFilter)
+ SetArchiveBrowsing(bool)
+ SetFileTypeFilter(filter ShouldFileBeIgnored)
+ GetProgressChan() chan CurrentProgress
+ GetDone() SignalGroup
+ ResetProgress()
+}
+
+// TimeFilter represents a function that determines if a file should be included based on its mtime
+type TimeFilter func(mtime time.Time) bool
--- /dev/null
+// Package common contains commong logic and interfaces used across Gdu
+// nolint: revive //Why: this is common package
+package common
+
+import (
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+// TermApplication is interface for the terminal UI app
+type TermApplication interface {
+ Run() error
+ Stop()
+ Suspend(f func()) bool
+ SetRoot(root tview.Primitive, fullscreen bool) *tview.Application
+ SetFocus(p tview.Primitive) *tview.Application
+ SetInputCapture(capture func(event *tcell.EventKey) *tcell.EventKey) *tview.Application
+ SetMouseCapture(
+ capture func(event *tcell.EventMouse, action tview.MouseAction) (*tcell.EventMouse, tview.MouseAction),
+ ) *tview.Application
+ QueueUpdateDraw(f func()) *tview.Application
+ SetBeforeDrawFunc(func(screen tcell.Screen) bool) *tview.Application
+}
--- /dev/null
+package common
+
+// Is64Bit returns true if the system is 64-bit
+const Is64Bit = (^uint(0) >> 63) == 1
--- /dev/null
+// Package common contains commong logic and interfaces used across Gdu
+// nolint: revive //Why: this is common package
+package common
+
+import (
+ "bufio"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ log "github.com/sirupsen/logrus"
+)
+
+// CreateIgnorePattern creates one pattern from all path patterns
+func CreateIgnorePattern(paths []string) (compiled *regexp.Regexp, err error) {
+ for i, path := range paths {
+ if _, err = regexp.Compile(path); err != nil {
+ return nil, err
+ }
+ if !filepath.IsAbs(path) {
+ absPath, err := filepath.Abs(path)
+ if err == nil {
+ paths = append(paths, absPath)
+ }
+ } else {
+ relPath, err := filepath.Rel("/", path)
+ if err == nil {
+ paths = append(paths, relPath)
+ }
+ }
+ paths[i] = "(" + path + ")"
+ }
+
+ ignore := `^` + strings.Join(paths, "|") + `$`
+ return regexp.Compile(ignore)
+}
+
+// SetIgnoreDirPaths sets paths to ignore
+func (ui *UI) SetIgnoreDirPaths(paths []string) {
+ log.Printf("Ignoring dirs %s", strings.Join(paths, ", "))
+ ui.IgnoreDirPaths = make(map[string]struct{}, len(paths)*2)
+ for _, path := range paths {
+ ui.IgnoreDirPaths[path] = struct{}{}
+ if !filepath.IsAbs(path) {
+ if absPath, err := filepath.Abs(path); err == nil {
+ ui.IgnoreDirPaths[absPath] = struct{}{}
+ }
+ } else {
+ if relPath, err := filepath.Rel("/", path); err == nil {
+ ui.IgnoreDirPaths[relPath] = struct{}{}
+ }
+ }
+ }
+}
+
+// SetIgnoreDirPatterns sets regular patterns of dirs to ignore
+func (ui *UI) SetIgnoreDirPatterns(paths []string) error {
+ var err error
+ log.Printf("Ignoring dir patterns %s", strings.Join(paths, ", "))
+ ui.IgnoreDirPathPatterns, err = CreateIgnorePattern(paths)
+ return err
+}
+
+// SetIgnoreFromFile sets regular patterns of dirs to ignore
+func (ui *UI) SetIgnoreFromFile(ignoreFile string) error {
+ var err error
+ var paths []string
+ log.Printf("Reading ignoring dir patterns from file '%s'", ignoreFile)
+
+ file, err := os.Open(ignoreFile)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ scanner := bufio.NewScanner(file)
+ for scanner.Scan() {
+ paths = append(paths, scanner.Text())
+ }
+
+ if err := scanner.Err(); err != nil {
+ return err
+ }
+
+ ui.IgnoreDirPathPatterns, err = CreateIgnorePattern(paths)
+ return err
+}
+
+// SetIgnoreTypes sets file types to ignore
+func (ui *UI) SetIgnoreTypes(types []string) {
+ log.Printf("Ignoring file types: %s", strings.Join(types, ", "))
+ ui.IgnoreTypes = types
+}
+
+// SetIncludeTypes sets file types to include (whitelist)
+func (ui *UI) SetIncludeTypes(types []string) {
+ log.Printf("Including only file types: %s", strings.Join(types, ", "))
+ ui.IncludeTypes = types
+}
+
+// SetIgnoreHidden sets flags if hidden dirs should be ignored
+func (ui *UI) SetIgnoreHidden(value bool) {
+ log.Printf("Ignoring hidden dirs")
+ ui.IgnoreHidden = value
+}
+
+// ShouldDirBeIgnored returns true if given path should be ignored
+func (ui *UI) ShouldDirBeIgnored(name, path string) bool {
+ _, shouldIgnore := ui.IgnoreDirPaths[path]
+ if shouldIgnore {
+ log.Printf("Directory %s ignored", path)
+ }
+ return shouldIgnore
+}
+
+// ShouldDirBeIgnoredUsingPattern returns true if given path should be ignored
+func (ui *UI) ShouldDirBeIgnoredUsingPattern(name, path string) bool {
+ shouldIgnore := ui.IgnoreDirPathPatterns.MatchString(path)
+ if shouldIgnore {
+ log.Printf("Directory %s ignored", path)
+ }
+ return shouldIgnore
+}
+
+// IsHiddenDir returns if the dir name begins with dot
+func (ui *UI) IsHiddenDir(name, path string) bool {
+ shouldIgnore := name[0] == '.'
+ if shouldIgnore {
+ log.Printf("Directory %s ignored", path)
+ }
+ return shouldIgnore
+}
+
+// ShouldFileBeIgnoredByType returns true if file should be ignored based on its extension
+func (ui *UI) ShouldFileBeIgnoredByType(name string) bool {
+ if len(ui.IgnoreTypes) == 0 {
+ return false
+ }
+
+ ext := strings.ToLower(filepath.Ext(name))
+ if ext == "" {
+ return false // No extension, don't ignore
+ }
+
+ // Remove leading dot from extension
+ ext = strings.TrimPrefix(ext, ".")
+
+ for _, ignoreType := range ui.IgnoreTypes {
+ // Remove leading dot from ignoreType
+ cleanIgnoreType := strings.TrimPrefix(strings.ToLower(ignoreType), ".")
+ if cleanIgnoreType == ext {
+ log.Printf("File %s ignored by type", name)
+ return true
+ }
+ }
+ return false
+}
+
+// ShouldFileBeIncludedByType returns true if file should be included based on its extension
+func (ui *UI) ShouldFileBeIncludedByType(name string) bool {
+ if len(ui.IncludeTypes) == 0 {
+ return true // No include filter, include all
+ }
+
+ ext := strings.ToLower(filepath.Ext(name))
+ if ext == "" {
+ return false // No extension, don't include if we have include filter
+ }
+
+ // Remove leading dot from extension
+ ext = strings.TrimPrefix(ext, ".")
+
+ for _, includeType := range ui.IncludeTypes {
+ // Remove leading dot from includeType
+ cleanIncludeType := strings.TrimPrefix(strings.ToLower(includeType), ".")
+ if cleanIncludeType == ext {
+ return true
+ }
+ }
+
+ log.Printf("File %s excluded by type filter", name)
+ return false
+}
+
+// CreateIgnoreFunc returns function for detecting if dir should be ignored
+// nolint: gocyclo // Why: This function is a switch statement that is not too complex
+func (ui *UI) CreateIgnoreFunc() ShouldDirBeIgnored {
+ switch {
+ case len(ui.IgnoreDirPaths) > 0 && ui.IgnoreDirPathPatterns == nil && !ui.IgnoreHidden:
+ return ui.ShouldDirBeIgnored
+ case len(ui.IgnoreDirPaths) > 0 && ui.IgnoreDirPathPatterns != nil && !ui.IgnoreHidden:
+ return func(name, path string) bool {
+ return ui.ShouldDirBeIgnored(name, path) || ui.ShouldDirBeIgnoredUsingPattern(name, path)
+ }
+ case len(ui.IgnoreDirPaths) > 0 && ui.IgnoreDirPathPatterns != nil && ui.IgnoreHidden:
+ return func(name, path string) bool {
+ return ui.ShouldDirBeIgnored(name, path) || ui.ShouldDirBeIgnoredUsingPattern(name, path) || ui.IsHiddenDir(name, path)
+ }
+ case len(ui.IgnoreDirPaths) == 0 && ui.IgnoreDirPathPatterns != nil && ui.IgnoreHidden:
+ return func(name, path string) bool {
+ return ui.ShouldDirBeIgnoredUsingPattern(name, path) || ui.IsHiddenDir(name, path)
+ }
+ case len(ui.IgnoreDirPaths) == 0 && ui.IgnoreDirPathPatterns != nil && !ui.IgnoreHidden:
+ return ui.ShouldDirBeIgnoredUsingPattern
+ case len(ui.IgnoreDirPaths) == 0 && ui.IgnoreDirPathPatterns == nil && ui.IgnoreHidden:
+ return ui.IsHiddenDir
+ case len(ui.IgnoreDirPaths) > 0 && ui.IgnoreDirPathPatterns == nil && ui.IgnoreHidden:
+ return func(name, path string) bool {
+ return ui.ShouldDirBeIgnored(name, path) || ui.IsHiddenDir(name, path)
+ }
+ default:
+ return func(name, path string) bool { return false }
+ }
+}
+
+// CreateFileTypeFilter returns function for detecting if file should be ignored based on type
+func (ui *UI) CreateFileTypeFilter() ShouldFileBeIgnored {
+ // If we have include types, use whitelist mode
+ if len(ui.IncludeTypes) > 0 {
+ return func(name string) bool {
+ return !ui.ShouldFileBeIncludedByType(name)
+ }
+ }
+
+ // If we have ignore types, use blacklist mode
+ if len(ui.IgnoreTypes) > 0 {
+ return func(name string) bool {
+ return ui.ShouldFileBeIgnoredByType(name)
+ }
+ }
+
+ // No type filtering - return nil to indicate no filtering is needed
+ return nil
+}
--- /dev/null
+package common_test
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/dundee/gdu/v5/internal/common"
+ "github.com/stretchr/testify/assert"
+)
+
+func init() {
+ log.SetLevel(log.WarnLevel)
+}
+
+func TestCreateIgnorePattern(t *testing.T) {
+ re, err := common.CreateIgnorePattern([]string{"[abc]+"})
+
+ assert.Nil(t, err)
+ assert.True(t, re.MatchString("aa"))
+}
+
+func TestCreateIgnorePatternWithErr(t *testing.T) {
+ re, err := common.CreateIgnorePattern([]string{"[[["})
+
+ assert.NotNil(t, err)
+ assert.Nil(t, re)
+}
+
+func TestEmptyIgnore(t *testing.T) {
+ ui := &common.UI{}
+ shouldBeIgnored := ui.CreateIgnoreFunc()
+
+ assert.False(t, shouldBeIgnored("abc", "/abc"))
+ assert.False(t, shouldBeIgnored("xxx", "/xxx"))
+}
+
+func TestIgnoreByAbsPath(t *testing.T) {
+ ui := &common.UI{}
+ ui.SetIgnoreDirPaths([]string{"/abc"})
+ shouldBeIgnored := ui.CreateIgnoreFunc()
+
+ assert.True(t, shouldBeIgnored("abc", "/abc"))
+ assert.False(t, shouldBeIgnored("xxx", "/xxx"))
+}
+
+func TestIgnoreByPattern(t *testing.T) {
+ ui := &common.UI{}
+ err := ui.SetIgnoreDirPatterns([]string{"/[abc]+"})
+ assert.Nil(t, err)
+ shouldBeIgnored := ui.CreateIgnoreFunc()
+
+ assert.True(t, shouldBeIgnored("aaa", "/aaa"))
+ assert.True(t, shouldBeIgnored("aaa", "/aaabc"))
+ assert.False(t, shouldBeIgnored("xxx", "/xxx"))
+}
+
+func TestIgnoreFromFile(t *testing.T) {
+ file, err := os.OpenFile("ignore", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
+ if err != nil {
+ panic(err)
+ }
+ defer file.Close()
+
+ if _, err := file.WriteString("/aaa\n"); err != nil {
+ panic(err)
+ }
+ if _, err := file.WriteString("/aaabc\n"); err != nil {
+ panic(err)
+ }
+ if _, err := file.WriteString("/[abd]+\n"); err != nil {
+ panic(err)
+ }
+
+ ui := &common.UI{}
+ err = ui.SetIgnoreFromFile("ignore")
+ assert.Nil(t, err)
+ shouldBeIgnored := ui.CreateIgnoreFunc()
+
+ assert.True(t, shouldBeIgnored("aaa", "/aaa"))
+ assert.True(t, shouldBeIgnored("aaabc", "/aaabc"))
+ assert.True(t, shouldBeIgnored("aaabd", "/aaabd"))
+ assert.False(t, shouldBeIgnored("xxx", "/xxx"))
+}
+
+func TestIgnoreFromNotExistingFile(t *testing.T) {
+ ui := &common.UI{}
+ err := ui.SetIgnoreFromFile("xxx")
+ assert.NotNil(t, err)
+}
+
+func TestIgnoreHidden(t *testing.T) {
+ ui := &common.UI{}
+ ui.SetIgnoreHidden(true)
+ shouldBeIgnored := ui.CreateIgnoreFunc()
+
+ assert.True(t, shouldBeIgnored(".git", "/aaa/.git"))
+ assert.True(t, shouldBeIgnored(".bbb", "/aaa/.bbb"))
+ assert.False(t, shouldBeIgnored("xxx", "/xxx"))
+}
+
+func TestIgnoreByAbsPathAndHidden(t *testing.T) {
+ ui := &common.UI{}
+ ui.SetIgnoreDirPaths([]string{"/abc"})
+ ui.SetIgnoreHidden(true)
+ shouldBeIgnored := ui.CreateIgnoreFunc()
+
+ assert.True(t, shouldBeIgnored("abc", "/abc"))
+ assert.True(t, shouldBeIgnored(".git", "/aaa/.git"))
+ assert.True(t, shouldBeIgnored(".bbb", "/aaa/.bbb"))
+ assert.False(t, shouldBeIgnored("xxx", "/xxx"))
+}
+
+func TestIgnoreByAbsPathAndPattern(t *testing.T) {
+ ui := &common.UI{}
+ ui.SetIgnoreDirPaths([]string{"/abc"})
+ err := ui.SetIgnoreDirPatterns([]string{"/[abc]+"})
+ assert.Nil(t, err)
+ shouldBeIgnored := ui.CreateIgnoreFunc()
+
+ assert.True(t, shouldBeIgnored("abc", "/abc"))
+ assert.True(t, shouldBeIgnored("aabc", "/aabc"))
+ assert.True(t, shouldBeIgnored("ccc", "/ccc"))
+ assert.False(t, shouldBeIgnored("xxx", "/xxx"))
+}
+
+func TestIgnoreByPatternAndHidden(t *testing.T) {
+ ui := &common.UI{}
+ err := ui.SetIgnoreDirPatterns([]string{"/[abc]+"})
+ assert.Nil(t, err)
+ ui.SetIgnoreHidden(true)
+ shouldBeIgnored := ui.CreateIgnoreFunc()
+
+ assert.True(t, shouldBeIgnored("abbc", "/abbc"))
+ assert.True(t, shouldBeIgnored(".git", "/aaa/.git"))
+ assert.True(t, shouldBeIgnored(".bbb", "/aaa/.bbb"))
+ assert.False(t, shouldBeIgnored("xxx", "/xxx"))
+}
+
+func TestIgnoreByAll(t *testing.T) {
+ ui := &common.UI{}
+ ui.SetIgnoreDirPaths([]string{"/abc"})
+ err := ui.SetIgnoreDirPatterns([]string{"/[abc]+"})
+ assert.Nil(t, err)
+ ui.SetIgnoreHidden(true)
+ shouldBeIgnored := ui.CreateIgnoreFunc()
+
+ assert.True(t, shouldBeIgnored("abc", "/abc"))
+ assert.True(t, shouldBeIgnored("aabc", "/aabc"))
+ assert.True(t, shouldBeIgnored(".git", "/aaa/.git"))
+ assert.True(t, shouldBeIgnored(".bbb", "/aaa/.bbb"))
+ assert.False(t, shouldBeIgnored("xxx", "/xxx"))
+}
+
+func TestIgnoreByRelativePath(t *testing.T) {
+ ui := &common.UI{}
+ ui.SetIgnoreDirPaths([]string{"test_dir/abc"})
+ shouldBeIgnored := ui.CreateIgnoreFunc()
+
+ assert.True(t, shouldBeIgnored("abc", "test_dir/abc"))
+ absPath, err := filepath.Abs("test_dir/abc")
+ assert.Nil(t, err)
+ assert.True(t, shouldBeIgnored("abc", absPath))
+ assert.False(t, shouldBeIgnored("xxx", "test_dir/xxx"))
+}
+
+func TestIgnoreByRelativePattern(t *testing.T) {
+ ui := &common.UI{}
+ err := ui.SetIgnoreDirPatterns([]string{"test_dir/[abc]+"})
+ assert.Nil(t, err)
+ shouldBeIgnored := ui.CreateIgnoreFunc()
+
+ assert.True(t, shouldBeIgnored("abc", "test_dir/abc"))
+ absPath, err := filepath.Abs("test_dir/abc")
+ assert.Nil(t, err)
+ assert.True(t, shouldBeIgnored("abc", absPath))
+ assert.False(t, shouldBeIgnored("xxx", "test_dir/xxx"))
+}
+
+func TestIgnoreFromFileWithRelativePaths(t *testing.T) {
+ file, err := os.OpenFile("ignore", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
+ if err != nil {
+ panic(err)
+ }
+ defer file.Close()
+ defer os.Remove("ignore")
+
+ if _, err := file.WriteString("test_dir/aaa\n"); err != nil {
+ panic(err)
+ }
+ if _, err := file.WriteString("node_modules/[^/]+\n"); err != nil {
+ panic(err)
+ }
+
+ ui := &common.UI{}
+ err = ui.SetIgnoreFromFile("ignore")
+ assert.Nil(t, err)
+ shouldBeIgnored := ui.CreateIgnoreFunc()
+
+ assert.True(t, shouldBeIgnored("aaa", "test_dir/aaa"))
+ absPath, err := filepath.Abs("test_dir/aaa")
+ assert.Nil(t, err)
+ assert.True(t, shouldBeIgnored("aaa", absPath))
+ assert.False(t, shouldBeIgnored("xxx", "test_dir/xxx"))
+}
+
+func TestShouldFileBeIgnoredByType(t *testing.T) {
+ tests := []struct {
+ name string
+ ignoreTypes []string
+ filename string
+ expectedIgnored bool
+ }{
+ {
+ name: "no ignore types",
+ ignoreTypes: []string{},
+ filename: "test.yaml",
+ expectedIgnored: false,
+ },
+ {
+ name: "ignore yaml",
+ ignoreTypes: []string{"yaml"},
+ filename: "test.yaml",
+ expectedIgnored: true,
+ },
+ {
+ name: "ignore json",
+ ignoreTypes: []string{"json"},
+ filename: "test.json",
+ expectedIgnored: true,
+ },
+ {
+ name: "ignore multiple types",
+ ignoreTypes: []string{"yaml", "json"},
+ filename: "test.yaml",
+ expectedIgnored: true,
+ },
+ {
+ name: "ignore multiple types - not matched",
+ ignoreTypes: []string{"yaml", "json"},
+ filename: "test.txt",
+ expectedIgnored: false,
+ },
+
+ {
+ name: "ignore with uppercase",
+ ignoreTypes: []string{"YAML"},
+ filename: "test.yaml",
+ expectedIgnored: true,
+ },
+ {
+ name: "ignore file without extension",
+ ignoreTypes: []string{"yaml"},
+ filename: "test",
+ expectedIgnored: false,
+ },
+ {
+ name: "ignore with dot in extension",
+ ignoreTypes: []string{".yaml"},
+ filename: "test.yaml",
+ expectedIgnored: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ui := &common.UI{}
+ ui.SetIgnoreTypes(tt.ignoreTypes)
+
+ actual := ui.ShouldFileBeIgnoredByType(tt.filename)
+ assert.Equal(t, tt.expectedIgnored, actual)
+ })
+ }
+}
+
+func TestShouldFileBeIncludedByType(t *testing.T) {
+ tests := []struct {
+ name string
+ includeTypes []string
+ filename string
+ expectedIncluded bool
+ }{
+ {
+ name: "no include types",
+ includeTypes: []string{},
+ filename: "test.yaml",
+ expectedIncluded: true,
+ },
+ {
+ name: "include yaml",
+ includeTypes: []string{"yaml"},
+ filename: "test.yaml",
+ expectedIncluded: true,
+ },
+ {
+ name: "include json",
+ includeTypes: []string{"json"},
+ filename: "test.json",
+ expectedIncluded: true,
+ },
+ {
+ name: "include multiple types",
+ includeTypes: []string{"yaml", "json"},
+ filename: "test.yaml",
+ expectedIncluded: true,
+ },
+ {
+ name: "include multiple types - not matched",
+ includeTypes: []string{"yaml", "json"},
+ filename: "test.txt",
+ expectedIncluded: false,
+ },
+
+ {
+ name: "include with uppercase",
+ includeTypes: []string{"YAML"},
+ filename: "test.yaml",
+ expectedIncluded: true,
+ },
+ {
+ name: "include file without extension",
+ includeTypes: []string{"yaml"},
+ filename: "test",
+ expectedIncluded: false,
+ },
+ {
+ name: "include with dot in extension",
+ includeTypes: []string{".yaml"},
+ filename: "test.yaml",
+ expectedIncluded: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ui := &common.UI{}
+ ui.SetIncludeTypes(tt.includeTypes)
+
+ actual := ui.ShouldFileBeIncludedByType(tt.filename)
+ assert.Equal(t, tt.expectedIncluded, actual)
+ })
+ }
+}
+
+func TestCreateFileTypeFilter(t *testing.T) {
+ tests := []struct {
+ name string
+ includeTypes []string
+ ignoreTypes []string
+ filename string
+ expectedFiltered bool
+ }{
+ {
+ name: "no filters",
+ includeTypes: []string{},
+ ignoreTypes: []string{},
+ filename: "test.yaml",
+ expectedFiltered: false,
+ },
+ {
+ name: "include filter - matched",
+ includeTypes: []string{"yaml"},
+ ignoreTypes: []string{},
+ filename: "test.yaml",
+ expectedFiltered: false,
+ },
+ {
+ name: "include filter - not matched",
+ includeTypes: []string{"json"},
+ ignoreTypes: []string{},
+ filename: "test.yaml",
+ expectedFiltered: true,
+ },
+ {
+ name: "ignore filter - matched",
+ includeTypes: []string{},
+ ignoreTypes: []string{"yaml"},
+ filename: "test.yaml",
+ expectedFiltered: true,
+ },
+ {
+ name: "ignore filter - not matched",
+ includeTypes: []string{},
+ ignoreTypes: []string{"json"},
+ filename: "test.yaml",
+ expectedFiltered: false,
+ },
+ {
+ name: "include filter takes precedence",
+ includeTypes: []string{"yaml"},
+ ignoreTypes: []string{"yaml"},
+ filename: "test.yaml",
+ expectedFiltered: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ui := &common.UI{}
+ ui.SetIncludeTypes(tt.includeTypes)
+ ui.SetIgnoreTypes(tt.ignoreTypes)
+
+ filter := ui.CreateFileTypeFilter()
+ var actual bool
+ if filter == nil {
+ // When filter is nil, no filtering is applied, so file should not be filtered
+ actual = false
+ } else {
+ actual = filter(tt.filename)
+ }
+ assert.Equal(t, tt.expectedFiltered, actual)
+ })
+ }
+}
+
+func TestFileTypeFilterWithRealFiles(t *testing.T) {
+ // Create a temporary directory with test files
+ tmpDir := t.TempDir()
+
+ // Create test files
+ testFiles := []struct {
+ name string
+ content string
+ expected bool // expected to be included
+ }{
+ {"test.yaml", "key: value", true},
+ {"test.json", "{\"key\": \"value\"}", true},
+ {"test.txt", "plain text", false},
+ {"test.go", "package main", false},
+ {"noextension", "no extension", false},
+ }
+
+ for _, tf := range testFiles {
+ filePath := filepath.Join(tmpDir, tf.name)
+ err := os.WriteFile(filePath, []byte(tf.content), 0644)
+ assert.NoError(t, err)
+ }
+
+ // Test include filter
+ ui := &common.UI{}
+ ui.SetIncludeTypes([]string{"yaml", "json"})
+ filter := ui.CreateFileTypeFilter()
+
+ for _, tf := range testFiles {
+ actual := filter(tf.name)
+ expected := !tf.expected // filter returns true if file should be filtered out
+ assert.Equal(t, expected, actual, "Failed for file: %s", tf.name)
+ }
+
+ // Test ignore filter
+ ui2 := &common.UI{}
+ ui2.SetIgnoreTypes([]string{"txt", "go"})
+ filter2 := ui2.CreateFileTypeFilter()
+
+ for _, tf := range testFiles {
+ actual := filter2(tf.name)
+ // For ignore filter, yaml and json should not be filtered, txt and go should be filtered
+ expected := tf.name == "test.txt" || tf.name == "test.go"
+ assert.Equal(t, expected, actual, "Failed for file: %s", tf.name)
+ }
+}
+
+func TestCreateFileTypeFilterReturnsNilWhenNoFiltering(t *testing.T) {
+ ui := &common.UI{}
+ // No include or ignore types set
+ filter := ui.CreateFileTypeFilter()
+ assert.Nil(t, filter, "CreateFileTypeFilter should return nil when no filtering is configured")
+}
--- /dev/null
+// Package common contains commong logic and interfaces used across Gdu
+// nolint: revive //Why: this is common package
+package common
+
+type SignalGroup chan struct{}
+
+func (s SignalGroup) Wait() {
+ <-s
+}
+
+func (s SignalGroup) Broadcast() {
+ close(s)
+}
--- /dev/null
+// Package common contains commong logic and interfaces used across Gdu
+// nolint: revive //Why: this is common package
+package common
+
+import (
+ "regexp"
+ "strconv"
+)
+
+// UI struct
+type UI struct {
+ Analyzer Analyzer
+ IgnoreDirPaths map[string]struct{}
+ IgnoreDirPathPatterns *regexp.Regexp
+ IgnoreHidden bool
+ IgnoreTypes []string
+ IncludeTypes []string
+ UseColors bool
+ UseSIPrefix bool
+ ShowProgress bool
+ ShowApparentSize bool
+ ShowRelativeSize bool
+}
+
+// SetAnalyzer sets analyzer instance
+func (ui *UI) SetAnalyzer(a Analyzer) {
+ ui.Analyzer = a
+}
+
+// SetFollowSymlinks sets whether symlinks to files should be followed
+func (ui *UI) SetFollowSymlinks(v bool) {
+ ui.Analyzer.SetFollowSymlinks(v)
+}
+
+// SetShowAnnexedSize sets whether to use annexed size of git-annex files
+func (ui *UI) SetShowAnnexedSize(v bool) {
+ ui.Analyzer.SetShowAnnexedSize(v)
+}
+
+// SetTimeFilter sets the time filter function for file inclusion
+func (ui *UI) SetTimeFilter(timeFilter TimeFilter) {
+ ui.Analyzer.SetTimeFilter(timeFilter)
+}
+
+// SetArchiveBrowsing sets whether browsing of zip/jar archives is enabled
+func (ui *UI) SetArchiveBrowsing(v bool) {
+ ui.Analyzer.SetArchiveBrowsing(v)
+}
+
+// binary multiplies prefixes (IEC)
+const (
+ _ float64 = 1 << (10 * iota)
+ Ki
+ Mi
+ Gi
+ Ti
+ Pi
+ Ei
+)
+
+// SI prefixes
+const (
+ K float64 = 1e3
+ M float64 = 1e6
+ G float64 = 1e9
+ T float64 = 1e12
+ P float64 = 1e15
+ E float64 = 1e18
+)
+
+// FormatNumber returns number as a string with thousands separator
+func FormatNumber(n int64) string {
+ in := []byte(strconv.FormatInt(n, 10))
+
+ var out []byte
+ if i := len(in) % 3; i != 0 {
+ if out, in = append(out, in[:i]...), in[i:]; len(in) > 0 {
+ out = append(out, ',')
+ }
+ }
+ for len(in) > 0 {
+ if out, in = append(out, in[:3]...), in[3:]; len(in) > 0 {
+ out = append(out, ',')
+ }
+ }
+ return string(out)
+}
--- /dev/null
+// Package common contains commong logic and interfaces used across Gdu
+// nolint: revive //Why: this is common package
+package common
+
+import (
+ "testing"
+
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFormatNumber(t *testing.T) {
+ res := FormatNumber(1234567890)
+ assert.Equal(t, "1,234,567,890", res)
+}
+
+func TestSetFollowSymlinks(t *testing.T) {
+ ui := UI{
+ Analyzer: &MockedAnalyzer{},
+ }
+ ui.SetFollowSymlinks(true)
+
+ assert.Equal(t, true, ui.Analyzer.(*MockedAnalyzer).FollowSymlinks)
+}
+
+func TestSetShowAnnexedSize(t *testing.T) {
+ ui := UI{
+ Analyzer: &MockedAnalyzer{},
+ }
+ ui.SetShowAnnexedSize(true)
+
+ assert.Equal(t, true, ui.Analyzer.(*MockedAnalyzer).ShowAnnexedSize)
+}
+
+func TestSetEnableArchiveBrowsing(t *testing.T) {
+ ui := UI{
+ Analyzer: &MockedAnalyzer{},
+ }
+ ui.SetArchiveBrowsing(true)
+
+ assert.Equal(t, true, ui.Analyzer.(*MockedAnalyzer).ArchiveBrowsing)
+}
+
+type MockedAnalyzer struct {
+ FollowSymlinks bool
+ ShowAnnexedSize bool
+ ArchiveBrowsing bool
+}
+
+// SetFileTypeFilter sets the file type filter function
+func (a *MockedAnalyzer) SetFileTypeFilter(filter ShouldFileBeIgnored) {
+ // Mock implementation - do nothing
+}
+
+// AnalyzeDir returns dir with files with different size exponents
+func (a *MockedAnalyzer) AnalyzeDir(
+ path string, ignore ShouldDirBeIgnored, fileTypeFilter ShouldFileBeIgnored,
+) fs.Item {
+ return nil
+}
+
+// GetProgressChan returns always Done
+func (a *MockedAnalyzer) GetProgressChan() chan CurrentProgress {
+ return make(chan CurrentProgress)
+}
+
+// GetDone returns always Done
+func (a *MockedAnalyzer) GetDone() SignalGroup {
+ c := make(SignalGroup)
+ defer c.Broadcast()
+ return c
+}
+
+// ResetProgress does nothing
+func (a *MockedAnalyzer) ResetProgress() {}
+
+// SetFollowSymlinks does nothing
+func (a *MockedAnalyzer) SetFollowSymlinks(v bool) {
+ a.FollowSymlinks = v
+}
+
+// SetShowAnnexedSize does nothing
+func (a *MockedAnalyzer) SetShowAnnexedSize(v bool) {
+ a.ShowAnnexedSize = v
+}
+
+// SetTimeFilter does nothing
+func (a *MockedAnalyzer) SetTimeFilter(timeFilter TimeFilter) {}
+
+// SetArchiveBrowsing sets EnableArchiveBrowsing
+func (a *MockedAnalyzer) SetArchiveBrowsing(v bool) {
+ a.ArchiveBrowsing = v
+}
--- /dev/null
+package testanalyze
+
+import (
+ "errors"
+ "time"
+
+ "github.com/dundee/gdu/v5/internal/common"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/dundee/gdu/v5/pkg/remove"
+)
+
+// MockedAnalyzer returns dir with files with different size exponents
+type MockedAnalyzer struct{}
+
+// AnalyzeDir returns dir with files with different size exponents
+func (a *MockedAnalyzer) AnalyzeDir(
+ path string, ignore common.ShouldDirBeIgnored, fileTypeFilter common.ShouldFileBeIgnored,
+) fs.Item {
+ dir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "test_dir",
+ Usage: 1e12 + 1,
+ Size: 1e12 + 2,
+ Mtime: time.Date(2021, 8, 27, 22, 23, 24, 0, time.UTC),
+ },
+ BasePath: ".",
+ ItemCount: 12,
+ }
+ dir2 := &analyze.Dir{
+ File: &analyze.File{
+ Name: "aaa",
+ Usage: 1e12 + 1,
+ Size: 1e12 + 2,
+ Mtime: time.Date(2021, 8, 27, 22, 23, 27, 0, time.UTC),
+ Parent: dir,
+ },
+ }
+ dir3 := &analyze.Dir{
+ File: &analyze.File{
+ Name: "bbb",
+ Usage: 1e9 + 1,
+ Size: 1e9 + 2,
+ Mtime: time.Date(2021, 8, 27, 22, 23, 26, 0, time.UTC),
+ Parent: dir,
+ },
+ }
+ dir4 := &analyze.Dir{
+ File: &analyze.File{
+ Name: "ccc",
+ Usage: 1e6 + 1,
+ Size: 1e6 + 2,
+ Mtime: time.Date(2021, 8, 27, 22, 23, 25, 0, time.UTC),
+ Parent: dir,
+ },
+ }
+ file := &analyze.File{
+ Name: "ddd",
+ Usage: 1e3 + 1,
+ Size: 1e3 + 2,
+ Mtime: time.Date(2021, 8, 27, 22, 23, 24, 0, time.UTC),
+ Parent: dir,
+ }
+ dir.Files = fs.Files{dir2, dir3, dir4, file}
+
+ return dir
+}
+
+// GetProgressChan returns always Done
+func (a *MockedAnalyzer) GetProgressChan() chan common.CurrentProgress {
+ return make(chan common.CurrentProgress)
+}
+
+// GetDone returns always Done
+func (a *MockedAnalyzer) GetDone() common.SignalGroup {
+ c := make(common.SignalGroup)
+ defer c.Broadcast()
+ return c
+}
+
+// ResetProgress does nothing
+func (a *MockedAnalyzer) ResetProgress() {}
+
+// SetFollowSymlinks does nothing
+func (a *MockedAnalyzer) SetFollowSymlinks(v bool) {}
+
+// SetShowAnnexedSize does nothing
+func (a *MockedAnalyzer) SetShowAnnexedSize(v bool) {}
+
+// SetTimeFilter does nothing
+func (a *MockedAnalyzer) SetTimeFilter(timeFilter common.TimeFilter) {}
+
+// SetArchiveBrowsing does nothing
+func (a *MockedAnalyzer) SetArchiveBrowsing(v bool) {}
+
+// SetFileTypeFilter does nothing
+func (a *MockedAnalyzer) SetFileTypeFilter(fileTypeFilter common.ShouldFileBeIgnored) {}
+
+// ItemFromDirWithErr returns error
+func ItemFromDirWithErr(dir, file fs.Item) error {
+ return errors.New("Failed")
+}
+
+// ItemFromDirWithSleep returns error
+func ItemFromDirWithSleep(dir, file fs.Item) error {
+ time.Sleep(time.Millisecond * 600)
+ return remove.ItemFromDir(dir, file)
+}
+
+// ItemFromDirWithSleepAndErr returns error
+func ItemFromDirWithSleepAndErr(dir, file fs.Item) error {
+ time.Sleep(time.Millisecond * 600)
+ return errors.New("Failed")
+}
--- /dev/null
+package testapp
+
+import (
+ "errors"
+ "sync"
+
+ "github.com/dundee/gdu/v5/internal/common"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+// CreateSimScreen returns tcell.SimulationScreen
+func CreateSimScreen() tcell.SimulationScreen {
+ screen := tcell.NewSimulationScreen("UTF-8")
+ return screen
+}
+
+// CreateTestAppWithSimScreen returns app with simulation screen for tests
+func CreateTestAppWithSimScreen(width, height int) (app *tview.Application, screen tcell.SimulationScreen) {
+ app = tview.NewApplication()
+ screen = CreateSimScreen()
+ app.SetScreen(screen)
+ screen.SetSize(width, height)
+ return app, screen
+}
+
+// MockedApp is tview.Application with mocked methods
+type MockedApp struct {
+ mutex *sync.Mutex
+ updateDraws []func()
+ BeforeDraws []func(screen tcell.Screen) bool
+ FailRun bool
+}
+
+// CreateMockedApp returns app with simulation screen for tests
+func CreateMockedApp(failRun bool) common.TermApplication {
+ app := &MockedApp{
+ FailRun: failRun,
+ updateDraws: make([]func(), 0, 1),
+ BeforeDraws: make([]func(screen tcell.Screen) bool, 0, 1),
+ mutex: &sync.Mutex{},
+ }
+ return app
+}
+
+// Run does nothing
+func (app *MockedApp) Run() error {
+ if app.FailRun {
+ return errors.New("Fail")
+ }
+
+ return nil
+}
+
+// Stop does nothing
+func (app *MockedApp) Stop() {}
+
+// Suspend runs given function
+func (app *MockedApp) Suspend(f func()) bool {
+ f()
+ return true
+}
+
+// SetRoot does nothing
+func (app *MockedApp) SetRoot(root tview.Primitive, fullscreen bool) *tview.Application {
+ return nil
+}
+
+// SetFocus does nothing
+func (app *MockedApp) SetFocus(p tview.Primitive) *tview.Application {
+ return nil
+}
+
+// SetInputCapture does nothing
+func (app *MockedApp) SetInputCapture(capture func(event *tcell.EventKey) *tcell.EventKey) *tview.Application {
+ return nil
+}
+
+// SetMouseCapture does nothing
+func (app *MockedApp) SetMouseCapture(
+ capture func(event *tcell.EventMouse, action tview.MouseAction) (*tcell.EventMouse, tview.MouseAction),
+) *tview.Application {
+ return nil
+}
+
+// QueueUpdateDraw does nothing
+func (app *MockedApp) QueueUpdateDraw(f func()) *tview.Application {
+ app.mutex.Lock()
+ app.updateDraws = append(app.updateDraws, f)
+ app.mutex.Unlock()
+ return nil
+}
+
+// QueueUpdateDraw does nothing
+func (app *MockedApp) GetUpdateDraws() []func() {
+ app.mutex.Lock()
+ defer app.mutex.Unlock()
+ return app.updateDraws
+}
+
+// SetBeforeDrawFunc does nothing
+func (app *MockedApp) SetBeforeDrawFunc(f func(screen tcell.Screen) bool) *tview.Application {
+ app.BeforeDraws = append(app.BeforeDraws, f)
+ return nil
+}
--- /dev/null
+[1,2,{"progname":"gdu","progver":"development","timestamp":1626807263},
+[{"name":"/home/gdu"},
+[{"name":"app"},
+{"name":"app.go","asize":4638,"dsize":8192},
+{"name":"app_linux_test.go","asize":1410,"dsize":4096},
+{"name":"app_test.go","asize":4974,"dsize":8192}],
+{"name":"main.go","asize":3205,"dsize":4096}]]
--- /dev/null
+package testdev
+
+import "github.com/dundee/gdu/v5/pkg/device"
+
+// DevicesInfoGetterMock is mock of DevicesInfoGetter
+type DevicesInfoGetterMock struct {
+ Devices device.Devices
+}
+
+// GetDevicesInfo returns mocked devices
+func (t DevicesInfoGetterMock) GetDevicesInfo() (devices device.Devices, err error) {
+ return t.Devices, nil
+}
+
+// GetMounts returns all mounted filesystems from /proc/mounts
+func (t DevicesInfoGetterMock) GetMounts() (devices device.Devices, err error) {
+ return t.Devices, nil
+}
--- /dev/null
+package testdir
+
+import (
+ "io/fs"
+ "os"
+)
+
+// CreateTestDir creates test dir structure
+func CreateTestDir() func() {
+ if err := os.MkdirAll("test_dir/nested/subnested", os.ModePerm); err != nil {
+ panic(err)
+ }
+ if err := os.WriteFile("test_dir/nested/subnested/file", []byte("hello"), 0o600); err != nil {
+ panic(err)
+ }
+ if err := os.WriteFile("test_dir/nested/file2", []byte("go"), 0o600); err != nil {
+ panic(err)
+ }
+ return func() {
+ err := os.RemoveAll("test_dir")
+ if err != nil {
+ panic(err)
+ }
+ }
+}
+
+// MockedPathChecker is mocked os.Stat, returns (nil, nil)
+func MockedPathChecker(path string) (info fs.FileInfo, err error) {
+ return nil, nil
+}
--- /dev/null
+//go:build linux || openbsd
+
+package analyze
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+const devBSize = 512
+
+func setPlatformSpecificAttrs(file *File, f os.FileInfo) {
+ if stat, ok := f.Sys().(*syscall.Stat_t); ok {
+ file.Usage = stat.Blocks * devBSize
+ file.Mtime = time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec))
+
+ if stat.Nlink > 1 {
+ file.Mli = stat.Ino
+ }
+ }
+}
+
+func setDirPlatformSpecificAttrs(dir *Dir, path string) {
+ var stat syscall.Stat_t
+ if err := syscall.Stat(path, &stat); err != nil {
+ return
+ }
+
+ dir.Mtime = time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec))
+}
+
+// getSyscallStats extracts usage and inode info from os.FileInfo using syscall
+func getSyscallStats(info os.FileInfo) (usage int64, mli uint64) {
+ if stat, ok := info.Sys().(*syscall.Stat_t); ok {
+ usage = stat.Blocks * 512 // 512-byte blocks
+ if stat.Nlink > 1 {
+ mli = stat.Ino
+ }
+ } else {
+ usage = info.Size()
+ }
+ return
+}
--- /dev/null
+//go:build linux
+
+package analyze
+
+import (
+ "os"
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestErr(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ err := os.Chmod("test_dir/nested", 0)
+ assert.Nil(t, err)
+ defer func() {
+ err = os.Chmod("test_dir/nested", 0o755)
+ assert.Nil(t, err)
+ }()
+
+ analyzer := CreateAnalyzer()
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*Dir)
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ assert.Equal(t, "test_dir", dir.GetName())
+ assert.Equal(t, int64(2), dir.ItemCount)
+ assert.Equal(t, '.', dir.GetFlag())
+
+ assert.Equal(t, "nested", dir.Files[0].GetName())
+ assert.Equal(t, '!', dir.Files[0].GetFlag())
+}
+
+func TestSeqErr(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ err := os.Chmod("test_dir/nested", 0)
+ assert.Nil(t, err)
+ defer func() {
+ err = os.Chmod("test_dir/nested", 0o755)
+ assert.Nil(t, err)
+ }()
+
+ analyzer := CreateSeqAnalyzer()
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*Dir)
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ assert.Equal(t, "test_dir", dir.GetName())
+ assert.Equal(t, int64(2), dir.ItemCount)
+ assert.Equal(t, '.', dir.GetFlag())
+
+ assert.Equal(t, "nested", dir.Files[0].GetName())
+ assert.Equal(t, '!', dir.Files[0].GetFlag())
+}
--- /dev/null
+//go:build windows || plan9
+
+package analyze
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func setPlatformSpecificAttrs(file *File, f os.FileInfo) {
+ stat := f.Sys().(*syscall.Win32FileAttributeData)
+ file.Mtime = time.Unix(0, stat.LastWriteTime.Nanoseconds())
+ file.Usage = f.Size() // No block info on Windows, use apparent size
+}
+
+func setDirPlatformSpecificAttrs(dir *Dir, path string) {
+ stat, err := os.Stat(path)
+ if err != nil {
+ return
+ }
+ dir.Mtime = stat.ModTime()
+}
+
+// getSyscallStats extracts usage and inode info from os.FileInfo using syscall
+func getSyscallStats(info os.FileInfo) (usage int64, mli uint64) {
+ usage = info.Size()
+ return
+}
--- /dev/null
+package analyze
+
+import (
+ "os"
+ "sort"
+ "testing"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/stretchr/testify/assert"
+)
+
+func init() {
+ log.SetLevel(log.WarnLevel)
+}
+
+func TestAnalyzeDir(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ analyzer := CreateAnalyzer()
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*Dir)
+
+ progress := <-analyzer.GetProgressChan()
+ assert.GreaterOrEqual(t, progress.TotalSize, int64(0))
+
+ analyzer.GetDone().Wait()
+ analyzer.ResetProgress()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ // test dir info
+ assert.Equal(t, "test_dir", dir.Name)
+ assert.Equal(t, int64(7+4096*3), dir.Size)
+ assert.Equal(t, int64(5), dir.ItemCount)
+ assert.True(t, dir.IsDir())
+
+ // test dir tree
+ assert.Equal(t, "nested", dir.Files[0].GetName())
+ assert.Equal(t, "subnested", dir.Files[0].(*Dir).Files[1].GetName())
+
+ // test file
+ assert.Equal(t, "file2", dir.Files[0].(*Dir).Files[0].GetName())
+ assert.Equal(t, int64(2), dir.Files[0].(*Dir).Files[0].GetSize())
+
+ assert.Equal(
+ t, "file", dir.Files[0].(*Dir).Files[1].(*Dir).Files[0].GetName(),
+ )
+ assert.Equal(
+ t, int64(5), dir.Files[0].(*Dir).Files[1].(*Dir).Files[0].GetSize(),
+ )
+
+ // test parent link
+ assert.Equal(
+ t,
+ "test_dir",
+ dir.Files[0].(*Dir).
+ Files[1].(*Dir).
+ Files[0].
+ GetParent().
+ GetParent().
+ GetParent().
+ GetName(),
+ )
+}
+
+func TestIgnoreDir(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ dir := CreateAnalyzer().AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return true }, func(_ string) bool { return false },
+ ).(*Dir)
+
+ assert.Equal(t, "test_dir", dir.Name)
+ assert.Equal(t, int64(1), dir.ItemCount)
+}
+
+func TestFlags(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ err := os.Mkdir("test_dir/empty", 0o644)
+ assert.Nil(t, err)
+
+ err = os.Symlink("test_dir/nested/file2", "test_dir/nested/file3")
+ assert.Nil(t, err)
+
+ analyzer := CreateAnalyzer()
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*Dir)
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ sort.Sort(sort.Reverse(dir.Files))
+
+ assert.Equal(t, int64(28+4096*4), dir.Size)
+ assert.Equal(t, int64(7), dir.ItemCount)
+
+ // test file3
+ assert.Equal(t, "nested", dir.Files[0].GetName())
+ assert.Equal(t, "file3", dir.Files[0].(*Dir).Files[1].GetName())
+ assert.Equal(t, int64(21), dir.Files[0].(*Dir).Files[1].GetSize())
+ assert.Equal(t, '@', dir.Files[0].(*Dir).Files[1].GetFlag())
+
+ assert.Equal(t, 'e', dir.Files[1].GetFlag())
+}
+
+func TestHardlink(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ err := os.Link("test_dir/nested/file2", "test_dir/nested/file3")
+ assert.Nil(t, err)
+
+ analyzer := CreateAnalyzer()
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*Dir)
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ assert.Equal(t, int64(7+4096*3), dir.Size) // file2 and file3 are counted just once for size
+ assert.Equal(t, int64(6), dir.ItemCount) // but twice for item count
+
+ // test file3
+ assert.Equal(t, "file3", dir.Files[0].(*Dir).Files[1].GetName())
+ assert.Equal(t, int64(2), dir.Files[0].(*Dir).Files[1].GetSize())
+ assert.Equal(t, 'H', dir.Files[0].(*Dir).Files[1].GetFlag())
+}
+
+func TestFollowSymlink(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ err := os.Mkdir("test_dir/empty", 0o644)
+ assert.Nil(t, err)
+
+ err = os.Symlink("./file2", "test_dir/nested/file3")
+ assert.Nil(t, err)
+
+ analyzer := CreateAnalyzer()
+ analyzer.SetFollowSymlinks(true)
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*Dir)
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ sort.Sort(sort.Reverse(dir.Files))
+
+ assert.Equal(t, int64(9+4096*4), dir.Size)
+ assert.Equal(t, int64(7), dir.ItemCount)
+
+ // test file3
+ assert.Equal(t, "nested", dir.Files[0].GetName())
+ assert.Equal(t, "file3", dir.Files[0].(*Dir).Files[1].GetName())
+ assert.Equal(t, int64(2), dir.Files[0].(*Dir).Files[1].GetSize())
+ assert.Equal(t, ' ', dir.Files[0].(*Dir).Files[1].GetFlag())
+
+ assert.Equal(t, 'e', dir.Files[1].GetFlag())
+}
+
+func TestGitAnnexSymlink(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ err := os.Mkdir("test_dir/empty", 0o644)
+ assert.Nil(t, err)
+
+ err = os.Symlink(
+ ".git/annex/objects/qx/qX/SHA256E-s967858083--"+
+ "3e54803fded8dc3a9ea68b106f7b51e04e33c79b4a7b32a860f0b22d89af5c65.mp4/SHA256E-s967858083--"+
+ "3e54803fded8dc3a9ea68b106f7b51e04e33c79b4a7b32a860f0b22d89af5c65.mp4",
+ "test_dir/nested/file3")
+ assert.Nil(t, err)
+
+ analyzer := CreateAnalyzer()
+ analyzer.SetFollowSymlinks(true)
+ analyzer.SetShowAnnexedSize(true)
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*Dir)
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ sort.Sort(sort.Reverse(dir.Files))
+
+ assert.Equal(t, int64(967858083+7+4096*4), dir.Size)
+ assert.Equal(t, int64(7), dir.ItemCount)
+
+ // test file3
+ assert.Equal(t, "nested", dir.Files[0].GetName())
+ assert.Equal(t, "file3", dir.Files[0].(*Dir).Files[1].GetName())
+ assert.Equal(t, int64(967858083), dir.Files[0].(*Dir).Files[1].GetSize())
+ assert.Equal(t, '@', dir.Files[0].(*Dir).Files[1].GetFlag())
+
+ assert.Equal(t, 'e', dir.Files[1].GetFlag())
+}
+
+func TestBrokenSymlinkSkipped(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ err := os.Mkdir("test_dir/empty", 0o644)
+ assert.Nil(t, err)
+
+ err = os.Symlink("xxx", "test_dir/nested/file3")
+ assert.Nil(t, err)
+
+ analyzer := CreateAnalyzer()
+ analyzer.SetFollowSymlinks(true)
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*Dir)
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ sort.Sort(sort.Reverse(dir.Files))
+
+ assert.Equal(t, int64(7+4096*4), dir.Size)
+ assert.Equal(t, int64(6), dir.ItemCount)
+
+ assert.Equal(t, '!', dir.Files[0].GetFlag())
+}
+
+func BenchmarkAnalyzeDir(b *testing.B) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ b.ResetTimer()
+
+ analyzer := CreateAnalyzer()
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ )
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+}
+
+func TestParallelStableOrderAnalyzerDeterminism(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ // Run parallel analyzer multiple times and verify results are identical
+ var results [][]string
+ for i := 0; i < 5; i++ {
+ analyzer := CreateStableOrderAnalyzer()
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ )
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ names := getFileNames(dir)
+ results = append(results, names)
+ }
+
+ // All runs should produce identical results
+ for i := 1; i < len(results); i++ {
+ assert.Equal(t, results[0], results[i],
+ "Parallel analyzer run %d produced different results than run 0", i)
+ }
+}
+
+func TestParallelVsSequentialConsistency(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ // Run sequential analyzer
+ seqAnalyzer := CreateSeqAnalyzer()
+ seqDir := seqAnalyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ )
+ seqAnalyzer.GetDone().Wait()
+ seqDir.UpdateStats(make(fs.HardLinkedItems))
+ seqNames := getFileNames(seqDir)
+
+ // Run parallel analyzer
+ parAnalyzer := CreateStableOrderAnalyzer()
+ parDir := parAnalyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ )
+ parAnalyzer.GetDone().Wait()
+ parDir.UpdateStats(make(fs.HardLinkedItems))
+ parNames := getFileNames(parDir)
+
+ // Results should match
+ assert.Equal(t, seqNames, parNames,
+ "Parallel and sequential analyzers produced different results")
+}
+
+func TestFileDirectoryInterleaving(t *testing.T) {
+ // Create test directory with interleaved files and directories
+ err := os.MkdirAll("test_interleave/aaa_dir", 0755)
+ assert.NoError(t, err)
+ err = os.WriteFile("test_interleave/bbb_file", []byte("content"), 0644)
+ assert.NoError(t, err)
+ err = os.MkdirAll("test_interleave/ccc_dir", 0755)
+ assert.NoError(t, err)
+ err = os.WriteFile("test_interleave/ddd_file", []byte("content"), 0644)
+ assert.NoError(t, err)
+ defer os.RemoveAll("test_interleave")
+
+ // Run sequential analyzer
+ seqAnalyzer := CreateSeqAnalyzer()
+ seqDir := seqAnalyzer.AnalyzeDir(
+ "test_interleave", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*Dir)
+ seqAnalyzer.GetDone().Wait()
+
+ // Run parallel analyzer
+ parAnalyzer := CreateStableOrderAnalyzer()
+ parDir := parAnalyzer.AnalyzeDir(
+ "test_interleave", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*Dir)
+ parAnalyzer.GetDone().Wait()
+
+ // Extract file/dir names in order
+ seqOrder := make([]string, len(seqDir.Files))
+ for i, item := range seqDir.Files {
+ seqOrder[i] = item.GetName()
+ }
+
+ parOrder := make([]string, len(parDir.Files))
+ for i, item := range parDir.Files {
+ parOrder[i] = item.GetName()
+ }
+
+ // The order must be identical: [aaa_dir, bbb_file, ccc_dir, ddd_file]
+ assert.Equal(t, seqOrder, parOrder,
+ "Parallel analyzer did not preserve file/directory interleaving")
+
+ // Verify the expected order (alphabetical from os.ReadDir)
+ assert.Equal(t, "aaa_dir", seqOrder[0])
+ assert.Equal(t, "bbb_file", seqOrder[1])
+ assert.Equal(t, "ccc_dir", seqOrder[2])
+ assert.Equal(t, "ddd_file", seqOrder[3])
+}
+
+// getFileNames recursively collects file names from a directory tree
+func getFileNames(item fs.Item) []string {
+ names := []string{item.GetName()}
+ if item.IsDir() {
+ for child := range item.GetFiles(fs.SortByName, fs.SortAsc) {
+ names = append(names, getFileNames(child)...)
+ }
+ }
+ return names
+}
--- /dev/null
+//go:build darwin || netbsd || freebsd
+
+package analyze
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+const devBSize = 512
+
+func setPlatformSpecificAttrs(file *File, f os.FileInfo) {
+ if stat, ok := f.Sys().(*syscall.Stat_t); ok {
+ file.Usage = stat.Blocks * devBSize
+ file.Mtime = time.Unix(int64(stat.Mtimespec.Sec), int64(stat.Mtimespec.Nsec))
+
+ if stat.Nlink > 1 {
+ file.Mli = stat.Ino
+ }
+ }
+}
+
+func setDirPlatformSpecificAttrs(dir *Dir, path string) {
+ var stat syscall.Stat_t
+ if err := syscall.Stat(path, &stat); err != nil {
+ return
+ }
+
+ dir.Mtime = time.Unix(int64(stat.Mtimespec.Sec), int64(stat.Mtimespec.Nsec))
+}
+
+// getSyscallStats extracts usage and inode info from os.FileInfo using syscall
+func getSyscallStats(info os.FileInfo) (usage int64, mli uint64) {
+ if stat, ok := info.Sys().(*syscall.Stat_t); ok {
+ usage = stat.Blocks * 512 // 512-byte blocks
+ if stat.Nlink > 1 {
+ mli = stat.Ino
+ }
+ } else {
+ usage = info.Size()
+ }
+ return
+}
--- /dev/null
+package analyze
+
+import (
+ "encoding/json"
+ "io"
+ "strconv"
+)
+
+// EncodeJSON writes JSON representation of dir
+func (f *Dir) EncodeJSON(writer io.Writer, topLevel bool) error {
+ buff := make([]byte, 0, 20)
+
+ buff = append(buff, []byte(`[{"name":`)...)
+
+ if topLevel {
+ if err := addString(&buff, f.GetPath()); err != nil {
+ return err
+ }
+ } else {
+ if err := addString(&buff, f.GetName()); err != nil {
+ return err
+ }
+ }
+
+ if !f.GetMtime().IsZero() {
+ buff = append(buff, []byte(`,"mtime":`)...)
+ buff = append(buff, []byte(strconv.FormatInt(f.GetMtime().Unix(), 10))...)
+ }
+
+ buff = append(buff, '}')
+ if f.Files.Len() > 0 {
+ buff = append(buff, ',')
+ }
+ buff = append(buff, '\n')
+
+ if _, err := writer.Write(buff); err != nil {
+ return err
+ }
+
+ for i, item := range f.Files {
+ if i > 0 {
+ if _, err := writer.Write([]byte(",\n")); err != nil {
+ return err
+ }
+ }
+ err := item.EncodeJSON(writer, false)
+ if err != nil {
+ return err
+ }
+ }
+
+ if _, err := writer.Write([]byte("]")); err != nil {
+ return err
+ }
+ return nil
+}
+
+// EncodeJSON writes JSON representation of file
+func (f *File) EncodeJSON(writer io.Writer, topLevel bool) error {
+ buff := make([]byte, 0, 20)
+
+ buff = append(buff, []byte(`{"name":`)...)
+ if err := addString(&buff, f.GetName()); err != nil {
+ return err
+ }
+ if f.GetSize() > 0 {
+ buff = append(buff, []byte(`,"asize":`)...)
+ buff = append(buff, []byte(strconv.FormatInt(f.GetSize(), 10))...)
+ }
+ if f.GetUsage() > 0 {
+ buff = append(buff, []byte(`,"dsize":`)...)
+ buff = append(buff, []byte(strconv.FormatInt(f.GetUsage(), 10))...)
+ }
+ if !f.GetMtime().IsZero() {
+ buff = append(buff, []byte(`,"mtime":`)...)
+ buff = append(buff, []byte(strconv.FormatInt(f.GetMtime().Unix(), 10))...)
+ }
+
+ if f.Flag == '@' {
+ buff = append(buff, []byte(`,"notreg":true`)...)
+ }
+ if f.Flag == 'H' {
+ buff = append(buff, []byte(`,"ino":`+strconv.FormatUint(f.Mli, 10)+`,"hlnkc":true`)...)
+ }
+
+ buff = append(buff, '}')
+
+ if _, err := writer.Write(buff); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addString(buff *[]byte, val string) error {
+ b, err := json.Marshal(val)
+ if err != nil {
+ return err
+ }
+ *buff = append(*buff, b...)
+ return err
+}
--- /dev/null
+package analyze
+
+import (
+ "bytes"
+ "testing"
+ "time"
+
+ "github.com/dundee/gdu/v5/pkg/fs"
+ log "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+)
+
+func init() {
+ log.SetLevel(log.WarnLevel)
+}
+
+func TestEncode(t *testing.T) {
+ dir := &Dir{
+ File: &File{
+ Name: "test_dir",
+ Size: 10,
+ Usage: 18,
+ Mtime: time.Date(2021, 8, 19, 0, 40, 0, 0, time.UTC),
+ },
+ ItemCount: 4,
+ BasePath: ".",
+ }
+
+ subdir := &Dir{
+ File: &File{
+ Name: "nested",
+ Size: 9,
+ Usage: 14,
+ Parent: dir,
+ },
+ ItemCount: 3,
+ }
+ file := &File{
+ Name: "file2",
+ Size: 3,
+ Usage: 4,
+ Parent: subdir,
+ }
+ file2 := &File{
+ Name: "file",
+ Size: 5,
+ Usage: 6,
+ Parent: subdir,
+ Flag: '@',
+ Mtime: time.Date(2021, 8, 19, 0, 40, 0, 0, time.UTC),
+ }
+ file3 := &File{
+ Name: "file3",
+ Mli: 1234,
+ Flag: 'H',
+ }
+ dir.Files = fs.Files{subdir}
+ subdir.Files = fs.Files{file, file2, file3}
+
+ var buff bytes.Buffer
+ err := dir.EncodeJSON(&buff, true)
+
+ assert.Nil(t, err)
+ assert.Contains(t, buff.String(), `"name":"nested"`)
+ assert.Contains(t, buff.String(), `"mtime":1629333600`)
+ assert.Contains(t, buff.String(), `"ino":1234`)
+ assert.Contains(t, buff.String(), `"hlnkc":true`)
+}
--- /dev/null
+package analyze
+
+import (
+ "iter"
+ "path/filepath"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/dundee/gdu/v5/pkg/fs"
+)
+
+// File struct
+type File struct {
+ Mtime time.Time
+ Parent fs.Item
+ Name string
+ Size int64
+ Usage int64
+ Mli uint64
+ Flag rune
+}
+
+// GetName returns name of dir
+func (f *File) GetName() string {
+ return f.Name
+}
+
+// IsDir returns false for file
+func (f *File) IsDir() bool {
+ return false
+}
+
+// GetParent returns parent dir
+func (f *File) GetParent() fs.Item {
+ return f.Parent
+}
+
+// SetParent sets parent dir
+func (f *File) SetParent(parent fs.Item) {
+ f.Parent = parent
+}
+
+// GetPath returns absolute Get of the file
+func (f *File) GetPath() string {
+ return filepath.Join(f.Parent.GetPath(), f.Name)
+}
+
+// GetFlag returns flag of the file
+func (f *File) GetFlag() rune {
+ return f.Flag
+}
+
+// GetSize returns size of the file
+func (f *File) GetSize() int64 {
+ return f.Size
+}
+
+// GetUsage returns usage of the file
+func (f *File) GetUsage() int64 {
+ return f.Usage
+}
+
+// GetMtime returns mtime of the file
+func (f *File) GetMtime() time.Time {
+ return f.Mtime
+}
+
+// GetType returns name type of item
+func (f *File) GetType() string {
+ if f.Flag == '@' {
+ return "Other"
+ }
+ return "File"
+}
+
+// GetItemCount returns 1 for file
+func (f *File) GetItemCount() int64 {
+ return 1
+}
+
+// GetMultiLinkedInode returns inode number of multilinked file
+func (f *File) GetMultiLinkedInode() uint64 {
+ return f.Mli
+}
+
+func (f *File) alreadyCounted(linkedItems fs.HardLinkedItems) bool {
+ mli := f.Mli
+ counted := false
+ if mli > 0 {
+ f.Flag = 'H'
+ if _, ok := linkedItems[mli]; ok {
+ counted = true
+ }
+ linkedItems[mli] = append(linkedItems[mli], f)
+ }
+ return counted
+}
+
+// GetItemStats returns 1 as count of items, apparent usage and real usage of this file
+func (f *File) GetItemStats(linkedItems fs.HardLinkedItems) (itemCount, size, usage int64) {
+ if f.alreadyCounted(linkedItems) {
+ return 1, 0, 0
+ }
+ return 1, f.GetSize(), f.GetUsage()
+}
+
+// UpdateStats does nothing on file
+func (f *File) UpdateStats(linkedItems fs.HardLinkedItems) {}
+
+// GetFiles returns all files in directory
+func (f *File) GetFiles(sortBy fs.SortBy, order fs.SortOrder) iter.Seq[fs.Item] {
+ return func(yield func(fs.Item) bool) {}
+}
+
+// GetFilesLocked returns all files in directory
+func (f *File) GetFilesLocked(sortBy fs.SortBy, order fs.SortOrder) iter.Seq[fs.Item] {
+ return f.GetFiles(sortBy, order)
+}
+
+// RLock panics on file
+func (f *File) RLock() func() {
+ panic("RLock should not be called on file")
+}
+
+// AddFile panics on file
+func (f *File) AddFile(item fs.Item) {
+ panic("AddFile should not be called on file")
+}
+
+// RemoveFile panics on file
+func (f *File) RemoveFile(item fs.Item) {
+ panic("RemoveFile should not be called on file")
+}
+
+// RemoveFileByName panics on file
+func (f *File) RemoveFileByName(name string) {
+ panic("RemoveFileByName should not be called on file")
+}
+
+// Dir struct
+type Dir struct {
+ *File
+ BasePath string
+ Files fs.Files
+ ItemCount int64
+ m sync.RWMutex
+}
+
+// AddFile add item to files
+func (f *Dir) AddFile(item fs.Item) {
+ f.Files = append(f.Files, item)
+}
+
+// GetFiles returns all files in directory as a sorted iterator
+func (f *Dir) GetFiles(sortBy fs.SortBy, order fs.SortOrder) iter.Seq[fs.Item] {
+ return func(yield func(fs.Item) bool) {
+ // Make a copy to avoid modifying the original slice
+ sorted := make(fs.Files, len(f.Files))
+ copy(sorted, f.Files)
+ sortFiles(sorted, sortBy, order)
+
+ for _, item := range sorted {
+ if !yield(item) {
+ return
+ }
+ }
+ }
+}
+
+// GetFilesLocked returns all files in directory as a sorted iterator
+// It is safe to call this function from multiple goroutines
+func (f *Dir) GetFilesLocked(sortBy fs.SortBy, order fs.SortOrder) iter.Seq[fs.Item] {
+ return func(yield func(fs.Item) bool) {
+ f.m.RLock()
+ defer f.m.RUnlock()
+
+ // Make a copy to avoid modifying the original slice
+ sorted := make(fs.Files, len(f.Files))
+ copy(sorted, f.Files)
+ sortFiles(sorted, sortBy, order)
+
+ for _, item := range sorted {
+ if !yield(item) {
+ return
+ }
+ }
+ }
+}
+
+// GetType returns name type of item
+func (f *Dir) GetType() string {
+ return "Directory"
+}
+
+// GetItemCount returns number of files in dir
+func (f *Dir) GetItemCount() int64 {
+ f.m.RLock()
+ defer f.m.RUnlock()
+ return f.ItemCount
+}
+
+// IsDir returns true for dir
+func (f *Dir) IsDir() bool {
+ return true
+}
+
+// GetPath returns absolute path of the file
+func (f *Dir) GetPath() string {
+ if f.BasePath != "" {
+ return filepath.Join(f.BasePath, f.Name)
+ }
+ if f.Parent != nil {
+ return filepath.Join(f.Parent.GetPath(), f.Name)
+ }
+ return f.Name
+}
+
+// GetItemStats returns item count, apparent usage and real usage of this dir
+func (f *Dir) GetItemStats(linkedItems fs.HardLinkedItems) (itemCount, size, usage int64) {
+ f.UpdateStats(linkedItems)
+ return f.ItemCount, f.GetSize(), f.GetUsage()
+}
+
+// UpdateStats recursively updates size and item count
+func (f *Dir) UpdateStats(linkedItems fs.HardLinkedItems) {
+ totalSize := int64(4096)
+ totalUsage := int64(4096)
+ var itemCount int64
+ for _, entry := range f.Files {
+ count, size, usage := entry.GetItemStats(linkedItems)
+ totalSize += size
+ totalUsage += usage
+ itemCount += count
+
+ if entry.GetMtime().After(f.Mtime) {
+ f.Mtime = entry.GetMtime()
+ }
+
+ switch entry.GetFlag() {
+ case '!', '.':
+ if f.Flag != '!' {
+ f.Flag = '.'
+ }
+ }
+ }
+ f.ItemCount = itemCount + 1
+ f.Size = totalSize
+ f.Usage = totalUsage
+}
+
+// RemoveFile removes item from dir, updates size and item count
+func (f *Dir) RemoveFile(item fs.Item) {
+ f.m.Lock()
+ defer f.m.Unlock()
+
+ f.Files = f.Files.Remove(item)
+
+ cur := f
+ for {
+ cur.ItemCount -= item.GetItemCount()
+ cur.Size -= item.GetSize()
+ cur.Usage -= item.GetUsage()
+
+ if cur.Parent == nil {
+ break
+ }
+ cur = cur.Parent.(*Dir)
+ }
+}
+
+// sortFiles sorts files in place according to sortBy and order
+func sortFiles(files fs.Files, sortBy fs.SortBy, order fs.SortOrder) {
+ var sorter sort.Interface
+ switch sortBy {
+ case fs.SortByName:
+ sorter = fs.ByName(files)
+ case fs.SortByItemCount:
+ sorter = fs.ByItemCount(files)
+ case fs.SortByMtime:
+ sorter = fs.ByMtime(files)
+ case fs.SortByApparentSize:
+ sorter = fs.ByApparentSize(files)
+ case fs.SortBySize:
+ sorter = files
+ }
+
+ if order == fs.SortDesc {
+ sort.Sort(sort.Reverse(sorter))
+ } else {
+ sort.Sort(sorter)
+ }
+}
+
+// RLock read locks dir
+func (f *Dir) RLock() func() {
+ f.m.RLock()
+ return f.m.RUnlock
+}
+
+// RemoveFileByName removes item by name from dir
+func (f *Dir) RemoveFileByName(name string) {
+ f.m.Lock()
+ defer f.m.Unlock()
+
+ idx, ok := f.Files.FindByName(name)
+ if !ok {
+ return
+ }
+ item := f.Files[idx]
+ f.Files = append(f.Files[:idx], f.Files[idx+1:]...)
+
+ cur := f
+ for {
+ cur.ItemCount -= item.GetItemCount()
+ cur.Size -= item.GetSize()
+ cur.Usage -= item.GetUsage()
+
+ if cur.Parent == nil {
+ break
+ }
+ cur = cur.Parent.(*Dir)
+ }
+}
--- /dev/null
+package analyze
+
+import (
+ "slices"
+ "testing"
+ "time"
+
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestIsDir(t *testing.T) {
+ dir := Dir{
+ File: &File{
+ Name: "xxx",
+ Size: 5,
+ },
+ ItemCount: 2,
+ }
+ file := &File{
+ Name: "yyy",
+ Size: 2,
+ Parent: &dir,
+ }
+ dir.Files = fs.Files{file}
+
+ assert.True(t, dir.IsDir())
+ assert.False(t, file.IsDir())
+}
+
+func TestGetType(t *testing.T) {
+ dir := Dir{
+ File: &File{
+ Name: "xxx",
+ Size: 5,
+ },
+ ItemCount: 2,
+ }
+ file := &File{
+ Name: "yyy",
+ Size: 2,
+ Parent: &dir,
+ Flag: ' ',
+ }
+ file2 := &File{
+ Name: "yyy",
+ Size: 2,
+ Parent: &dir,
+ Flag: '@',
+ }
+ dir.Files = fs.Files{file, file2}
+
+ assert.Equal(t, "Directory", dir.GetType())
+ assert.Equal(t, "File", file.GetType())
+ assert.Equal(t, "Other", file2.GetType())
+}
+
+func TestFind(t *testing.T) {
+ dir := Dir{
+ File: &File{
+ Name: "xxx",
+ Size: 5,
+ },
+ ItemCount: 2,
+ }
+
+ file := &File{
+ Name: "yyy",
+ Size: 2,
+ Parent: &dir,
+ }
+ file2 := &File{
+ Name: "zzz",
+ Size: 3,
+ Parent: &dir,
+ }
+ dir.Files = fs.Files{file, file2}
+
+ i, _ := dir.Files.IndexOf(file)
+ assert.Equal(t, 0, i)
+ i, _ = dir.Files.IndexOf(file2)
+ assert.Equal(t, 1, i)
+}
+
+func TestRemove(t *testing.T) {
+ dir := Dir{
+ File: &File{
+ Name: "xxx",
+ Size: 5,
+ },
+ ItemCount: 2,
+ }
+
+ file := &File{
+ Name: "yyy",
+ Size: 2,
+ Parent: &dir,
+ }
+ file2 := &File{
+ Name: "zzz",
+ Size: 3,
+ Parent: &dir,
+ }
+ dir.Files = fs.Files{file, file2}
+
+ dir.Files = dir.Files.Remove(file)
+
+ assert.Equal(t, 1, len(dir.Files))
+ assert.Equal(t, file2, dir.Files[0])
+}
+
+func TestRemoveByName(t *testing.T) {
+ dir := Dir{
+ File: &File{
+ Name: "xxx",
+ Size: 5,
+ Usage: 8,
+ },
+ ItemCount: 2,
+ }
+
+ file := &File{
+ Name: "yyy",
+ Size: 2,
+ Usage: 4,
+ Parent: &dir,
+ }
+ file2 := &File{
+ Name: "zzz",
+ Size: 3,
+ Usage: 4,
+ Parent: &dir,
+ }
+ dir.Files = fs.Files{file, file2}
+
+ dir.Files = dir.Files.RemoveByName("yyy")
+
+ assert.Equal(t, 1, len(dir.Files))
+ assert.Equal(t, file2, dir.Files[0])
+}
+
+func TestRemoveNotInDir(t *testing.T) {
+ dir := Dir{
+ File: &File{
+ Name: "xxx",
+ Size: 5,
+ Usage: 8,
+ },
+ ItemCount: 2,
+ }
+
+ file := &File{
+ Name: "yyy",
+ Size: 2,
+ Usage: 4,
+ Parent: &dir,
+ }
+ file2 := &File{
+ Name: "zzz",
+ Size: 3,
+ Usage: 4,
+ }
+ dir.Files = fs.Files{file}
+
+ _, ok := dir.Files.IndexOf(file2)
+ assert.Equal(t, false, ok)
+
+ dir.Files = dir.Files.Remove(file2)
+
+ assert.Equal(t, 1, len(dir.Files))
+}
+
+func TestRemoveByNameNotInDir(t *testing.T) {
+ dir := Dir{
+ File: &File{
+ Name: "xxx",
+ Size: 5,
+ Usage: 8,
+ },
+ ItemCount: 2,
+ }
+
+ file := &File{
+ Name: "yyy",
+ Size: 2,
+ Usage: 4,
+ Parent: &dir,
+ }
+ file2 := &File{
+ Name: "zzz",
+ Size: 3,
+ Usage: 4,
+ }
+ dir.Files = fs.Files{file}
+
+ _, ok := dir.Files.IndexOf(file2)
+ assert.Equal(t, false, ok)
+
+ dir.Files = dir.Files.RemoveByName("zzz")
+
+ assert.Equal(t, 1, len(dir.Files))
+}
+
+func TestUpdateStats(t *testing.T) {
+ dir := Dir{
+ File: &File{
+ Name: "xxx",
+ Size: 1,
+ Mtime: time.Date(2021, 8, 19, 0, 40, 0, 0, time.UTC),
+ },
+ ItemCount: 1,
+ }
+
+ file := &File{
+ Name: "yyy",
+ Size: 2,
+ Mtime: time.Date(2021, 8, 19, 0, 41, 0, 0, time.UTC),
+ Parent: &dir,
+ }
+ file2 := &File{
+ Name: "zzz",
+ Size: 3,
+ Mtime: time.Date(2021, 8, 19, 0, 42, 0, 0, time.UTC),
+ Parent: &dir,
+ }
+ dir.Files = fs.Files{file, file2}
+
+ dir.UpdateStats(nil)
+
+ assert.Equal(t, int64(4096+5), dir.Size)
+ assert.Equal(t, 42, dir.GetMtime().Minute())
+}
+
+func TestGetMultiLinkedInode(t *testing.T) {
+ file := &File{
+ Name: "xxx",
+ Mli: 5,
+ }
+
+ assert.Equal(t, uint64(5), file.GetMultiLinkedInode())
+}
+
+func TestGetPathWithoutLeadingSlash(t *testing.T) {
+ dir := &Dir{
+ File: &File{
+ Name: "C:\\",
+ Size: 5,
+ Usage: 12,
+ },
+ ItemCount: 3,
+ BasePath: "",
+ }
+
+ assert.Equal(t, "C:\\", dir.GetPath())
+}
+
+func TestSetParent(t *testing.T) {
+ dir := &Dir{
+ File: &File{
+ Name: "root",
+ Size: 5,
+ Usage: 12,
+ },
+ ItemCount: 3,
+ BasePath: "/",
+ }
+ file := &File{
+ Name: "xxx",
+ Mli: 5,
+ }
+ file.SetParent(dir)
+
+ assert.Equal(t, "root", file.GetParent().GetName())
+}
+
+func TestGetFiles(t *testing.T) {
+ file := &File{
+ Name: "xxx",
+ Mli: 5,
+ }
+ dir := &Dir{
+ File: &File{
+ Name: "root",
+ Size: 5,
+ Usage: 12,
+ },
+ ItemCount: 3,
+ BasePath: "/",
+ Files: fs.Files{file},
+ }
+
+ dirFiles := slices.Collect(dir.GetFiles(fs.SortByName, fs.SortAsc))
+ assert.Equal(t, file.Name, dirFiles[0].GetName())
+ fileFiles := slices.Collect(file.GetFiles(fs.SortByName, fs.SortAsc))
+ assert.Equal(t, 0, len(fileFiles))
+}
+
+func TestGetFilesLocked(t *testing.T) {
+ file := &File{
+ Name: "xxx",
+ Mli: 5,
+ }
+ dir := &Dir{
+ File: &File{
+ Name: "root",
+ Size: 5,
+ Usage: 12,
+ },
+ ItemCount: 3,
+ BasePath: "/",
+ Files: fs.Files{file},
+ }
+
+ unlock := dir.RLock()
+ defer unlock()
+ files := slices.Collect(dir.GetFiles(fs.SortByName, fs.SortAsc))
+ locked := slices.Collect(dir.GetFilesLocked(fs.SortByName, fs.SortAsc))
+ assert.Equal(t, len(files), len(locked))
+ assert.Equal(t, files[0].GetName(), locked[0].GetName())
+}
+
+func TestAddFilePanicsOnFile(t *testing.T) {
+ file := &File{
+ Name: "xxx",
+ Mli: 5,
+ }
+ assert.Panics(t, func() {
+ file.AddFile(file)
+ })
+}
--- /dev/null
+package analyze
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "github.com/dundee/gdu/v5/internal/common"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ log "github.com/sirupsen/logrus"
+)
+
+var concurrencyLimit = make(chan struct{}, 3*runtime.GOMAXPROCS(0))
+
+// ParallelAnalyzer implements Analyzer
+type ParallelAnalyzer struct {
+ progress *common.CurrentProgress
+ progressChan chan common.CurrentProgress
+ progressOutChan chan common.CurrentProgress
+ progressDoneChan chan struct{}
+ doneChan common.SignalGroup
+ wait *WaitGroup
+ ignoreDir common.ShouldDirBeIgnored
+ ignoreFileType common.ShouldFileBeIgnored
+ followSymlinks bool
+ gitAnnexedSize bool
+ matchesTimeFilterFn common.TimeFilter
+ archiveBrowsing bool
+}
+
+// CreateAnalyzer returns Analyzer
+func CreateAnalyzer() *ParallelAnalyzer {
+ return &ParallelAnalyzer{
+ progress: &common.CurrentProgress{
+ ItemCount: 0,
+ TotalSize: int64(0),
+ },
+ progressChan: make(chan common.CurrentProgress, 1),
+ progressOutChan: make(chan common.CurrentProgress, 1),
+ progressDoneChan: make(chan struct{}),
+ doneChan: make(common.SignalGroup),
+ wait: (&WaitGroup{}).Init(),
+ }
+}
+
+// SetFollowSymlinks sets whether symlink to files should be followed
+func (a *ParallelAnalyzer) SetFollowSymlinks(v bool) {
+ a.followSymlinks = v
+}
+
+// SetShowAnnexedSize sets whether to use annexed size of git-annex files
+func (a *ParallelAnalyzer) SetShowAnnexedSize(v bool) {
+ a.gitAnnexedSize = v
+}
+
+// SetTimeFilter sets the time filter function for file inclusion
+func (a *ParallelAnalyzer) SetTimeFilter(matchesTimeFilterFn common.TimeFilter) {
+ a.matchesTimeFilterFn = matchesTimeFilterFn
+}
+
+// SetArchiveBrowsing sets whether browsing of zip/jar archives is enabled
+func (a *ParallelAnalyzer) SetArchiveBrowsing(v bool) {
+ a.archiveBrowsing = v
+}
+
+// SetFileTypeFilter sets the file type filter function
+func (a *ParallelAnalyzer) SetFileTypeFilter(filter common.ShouldFileBeIgnored) {
+ a.ignoreFileType = filter
+}
+
+// GetProgressChan returns channel for getting progress
+func (a *ParallelAnalyzer) GetProgressChan() chan common.CurrentProgress {
+ return a.progressOutChan
+}
+
+// GetDone returns channel for checking when analysis is done
+func (a *ParallelAnalyzer) GetDone() common.SignalGroup {
+ return a.doneChan
+}
+
+// ResetProgress returns progress
+func (a *ParallelAnalyzer) ResetProgress() {
+ a.progress = &common.CurrentProgress{}
+ a.progressChan = make(chan common.CurrentProgress, 1)
+ a.progressOutChan = make(chan common.CurrentProgress, 1)
+ a.progressDoneChan = make(chan struct{})
+ a.doneChan = make(common.SignalGroup)
+ a.wait = (&WaitGroup{}).Init()
+}
+
+// AnalyzeDir analyzes given path
+func (a *ParallelAnalyzer) AnalyzeDir(
+ path string, ignore common.ShouldDirBeIgnored, fileTypeFilter common.ShouldFileBeIgnored,
+) fs.Item {
+ a.ignoreDir = ignore
+ a.ignoreFileType = fileTypeFilter
+
+ go a.updateProgress()
+ dir := a.processDir(path)
+
+ dir.BasePath = filepath.Dir(path)
+ a.wait.Wait()
+
+ a.progressDoneChan <- struct{}{}
+ a.doneChan.Broadcast()
+
+ return dir
+}
+
+func (a *ParallelAnalyzer) processDir(path string) *Dir {
+ var (
+ file fs.Item
+ err error
+ totalSize int64
+ info os.FileInfo
+ subDirChan = make(chan *Dir)
+ dirCount int
+ )
+
+ a.wait.Add(1)
+
+ files, err := os.ReadDir(path)
+ if err != nil {
+ log.Print(err.Error())
+ }
+
+ dir := &Dir{
+ File: &File{
+ Name: filepath.Base(path),
+ Flag: getDirFlag(err, len(files)),
+ },
+ ItemCount: 1,
+ Files: make(fs.Files, 0, len(files)),
+ }
+ setDirPlatformSpecificAttrs(dir, path)
+
+ for _, f := range files {
+ name := f.Name()
+ entryPath := filepath.Join(path, name)
+ if f.IsDir() {
+ if a.ignoreDir(name, entryPath) {
+ continue
+ }
+ dirCount++
+
+ go func(entryPath string) {
+ concurrencyLimit <- struct{}{}
+ subdir := a.processDir(entryPath)
+ subdir.Parent = dir
+
+ subDirChan <- subdir
+ <-concurrencyLimit
+ }(entryPath)
+ } else {
+ info, err = f.Info()
+ if err != nil {
+ log.Print(err.Error())
+ dir.Flag = '!'
+ continue
+ }
+ if a.followSymlinks && info.Mode()&os.ModeSymlink != 0 {
+ infoF, err := followSymlink(entryPath, a.gitAnnexedSize)
+ if err != nil {
+ log.Print(err.Error())
+ dir.Flag = '!'
+ continue
+ }
+ if infoF != nil {
+ info = infoF
+ }
+ }
+
+ // Check if it's a zip or jar file
+ if a.archiveBrowsing && isZipFile(name) {
+ zipDir, err := processZipFile(entryPath, info)
+ if err != nil {
+ // If unable to process zip file, treat as regular file
+ log.Printf("Failed to process zip file %s: %v", entryPath, err)
+ file = &File{
+ Name: name,
+ Flag: getFlag(info),
+ Size: info.Size(),
+ Parent: dir,
+ }
+ } else {
+ // Successfully processed zip file, use zip content size
+ uncompressedSize, compressedSize, err := getZipFileSize(entryPath)
+ if err == nil {
+ zipDir.Size = uncompressedSize
+ zipDir.Usage = compressedSize
+ }
+ zipDir.Parent = dir
+ file = zipDir
+ }
+ } else {
+ file = &File{
+ Name: name,
+ Flag: getFlag(info),
+ Size: info.Size(),
+ Parent: dir,
+ }
+ }
+
+ // Apply time filter if set
+ if a.matchesTimeFilterFn != nil && !a.matchesTimeFilterFn(info.ModTime()) {
+ continue // Skip this file
+ }
+
+ // Apply file type filter if set
+ if a.ignoreFileType != nil && a.ignoreFileType(name) {
+ continue // Skip this file
+ }
+
+ if file != nil {
+ // Only set platform-specific attributes for regular files
+ if regularFile, ok := file.(*File); ok {
+ setPlatformSpecificAttrs(regularFile, info)
+ }
+ totalSize += file.GetUsage()
+ dir.AddFile(file)
+ }
+ }
+ }
+
+ go func() {
+ var sub *Dir
+
+ for i := 0; i < dirCount; i++ {
+ sub = <-subDirChan
+ dir.AddFile(sub)
+ }
+
+ a.wait.Done()
+ }()
+
+ a.progressChan <- common.CurrentProgress{
+ CurrentItemName: path,
+ ItemCount: int64(len(files)),
+ TotalSize: totalSize,
+ }
+ return dir
+}
+
+func (a *ParallelAnalyzer) updateProgress() {
+ for {
+ select {
+ case <-a.progressDoneChan:
+ return
+ case progress := <-a.progressChan:
+ a.progress.CurrentItemName = progress.CurrentItemName
+ a.progress.ItemCount += progress.ItemCount
+ a.progress.TotalSize += progress.TotalSize
+ }
+
+ select {
+ case a.progressOutChan <- *a.progress:
+ default:
+ }
+ }
+}
+
+func getDirFlag(err error, items int) rune {
+ switch {
+ case err != nil:
+ return '!'
+ case items == 0:
+ return 'e'
+ default:
+ return ' '
+ }
+}
+
+func getFlag(f os.FileInfo) rune {
+ if f.Mode()&os.ModeSymlink != 0 || f.Mode()&os.ModeSocket != 0 {
+ return '@'
+ }
+ return ' '
+}
--- /dev/null
+package analyze
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParallelAnalyzerSetFollowSymlinks(t *testing.T) {
+ analyzer := CreateAnalyzer()
+ analyzer.SetFollowSymlinks(true)
+ assert.True(t, analyzer.followSymlinks)
+ analyzer.SetFollowSymlinks(false)
+ assert.False(t, analyzer.followSymlinks)
+}
+
+func TestParallelAnalyzerSetShowAnnexedSize(t *testing.T) {
+ analyzer := CreateAnalyzer()
+ analyzer.SetShowAnnexedSize(true)
+ assert.True(t, analyzer.gitAnnexedSize)
+ analyzer.SetShowAnnexedSize(false)
+ assert.False(t, analyzer.gitAnnexedSize)
+}
+
+func TestGetDirFlagWithError(t *testing.T) {
+ flag := getDirFlag(os.ErrNotExist, 5)
+ assert.Equal(t, '!', flag)
+}
+
+func TestGetDirFlagWithEmptyDir(t *testing.T) {
+ flag := getDirFlag(nil, 0)
+ assert.Equal(t, 'e', flag)
+}
+
+func TestGetDirFlagWithNormalDir(t *testing.T) {
+ flag := getDirFlag(nil, 5)
+ assert.Equal(t, ' ', flag)
+}
+
+func TestGetFlagWithSymlink(t *testing.T) {
+ // Create a temporary symlink
+ symlinkPath := "/tmp/test_symlink"
+ defer os.Remove(symlinkPath)
+
+ err := os.Symlink("/tmp", symlinkPath)
+ assert.NoError(t, err)
+
+ info, err := os.Lstat(symlinkPath)
+ assert.NoError(t, err)
+
+ flag := getFlag(info)
+ assert.Equal(t, '@', flag)
+}
+
+func TestGetFlagWithRegularFile(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ info, err := os.Stat("test_dir/nested/file2")
+ assert.NoError(t, err)
+
+ flag := getFlag(info)
+ assert.Equal(t, ' ', flag)
+}
+
+func TestParallelAnalyzerUpdateProgress(t *testing.T) {
+ analyzer := CreateAnalyzer()
+
+ // Start the progress updater
+ go analyzer.updateProgress()
+
+ // Send some progress updates
+ analyzer.progressChan <- struct {
+ CurrentItemName string
+ ItemCount int64
+ TotalSize int64
+ }{
+ CurrentItemName: "test",
+ ItemCount: 5,
+ TotalSize: 100,
+ }
+
+ // Wait a bit for the progress to be processed
+ time.Sleep(10 * time.Millisecond)
+
+ // Send done signal
+ analyzer.progressDoneChan <- struct{}{}
+
+ // Wait for the updater to finish
+ time.Sleep(10 * time.Millisecond)
+}
+
+func TestParallelAnalyzerUpdateProgressWithDefaultCase(t *testing.T) {
+ analyzer := CreateAnalyzer()
+
+ // Start the progress updater
+ go analyzer.updateProgress()
+
+ // Send some progress updates
+ analyzer.progressChan <- struct {
+ CurrentItemName string
+ ItemCount int64
+ TotalSize int64
+ }{
+ CurrentItemName: "test",
+ ItemCount: 5,
+ TotalSize: 100,
+ }
+
+ // Wait a bit for the progress to be processed
+ time.Sleep(10 * time.Millisecond)
+
+ // Send another progress update to trigger the default case
+ analyzer.progressChan <- struct {
+ CurrentItemName string
+ ItemCount int64
+ TotalSize int64
+ }{
+ CurrentItemName: "test2",
+ ItemCount: 3,
+ TotalSize: 50,
+ }
+
+ // Wait a bit for the progress to be processed
+ time.Sleep(10 * time.Millisecond)
+
+ // Send done signal
+ analyzer.progressDoneChan <- struct{}{}
+
+ // Wait for the updater to finish
+ time.Sleep(10 * time.Millisecond)
+}
+
+func TestParallelAnalyzerAnalyzeDirWithIgnoreDir(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ analyzer := CreateAnalyzer()
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(name, _ string) bool { return name == "nested" }, func(_ string) bool { return false },
+ ).(*Dir)
+
+ analyzer.GetDone().Wait()
+
+ assert.NotNil(t, dir)
+ assert.Equal(t, "test_dir", dir.Name)
+ // Should have fewer items since nested directory was ignored
+ assert.Less(t, dir.ItemCount, int64(5))
+}
--- /dev/null
+package analyze
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/dundee/gdu/v5/internal/common"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ log "github.com/sirupsen/logrus"
+)
+
+// ParallelStableOrderAnalyzer implements Analyzer
+type ParallelStableOrderAnalyzer struct {
+ progress *common.CurrentProgress
+ progressChan chan common.CurrentProgress
+ progressOutChan chan common.CurrentProgress
+ progressDoneChan chan struct{}
+ doneChan common.SignalGroup
+ wait *WaitGroup
+ ignoreDir common.ShouldDirBeIgnored
+ ignoreFileType common.ShouldFileBeIgnored
+ followSymlinks bool
+ gitAnnexedSize bool
+}
+
+// CreateStableOrderAnalyzer returns parallel Analyzer which keeps stable order of files
+func CreateStableOrderAnalyzer() *ParallelStableOrderAnalyzer {
+ return &ParallelStableOrderAnalyzer{
+ progress: &common.CurrentProgress{
+ ItemCount: 0,
+ TotalSize: int64(0),
+ },
+ progressChan: make(chan common.CurrentProgress, 1),
+ progressOutChan: make(chan common.CurrentProgress, 1),
+ progressDoneChan: make(chan struct{}),
+ doneChan: make(common.SignalGroup),
+ wait: (&WaitGroup{}).Init(),
+ }
+}
+
+// SetFollowSymlinks sets whether symlink to files should be followed
+func (a *ParallelStableOrderAnalyzer) SetFollowSymlinks(v bool) {
+ a.followSymlinks = v
+}
+
+// SetShowAnnexedSize sets whether to use annexed size of git-annex files
+func (a *ParallelStableOrderAnalyzer) SetShowAnnexedSize(v bool) {
+ a.gitAnnexedSize = v
+}
+
+// SetFileTypeFilter sets the file type filter function
+func (a *ParallelStableOrderAnalyzer) SetFileTypeFilter(filter common.ShouldFileBeIgnored) {
+ a.ignoreFileType = filter
+}
+
+// GetProgressChan returns channel for getting progress
+func (a *ParallelStableOrderAnalyzer) GetProgressChan() chan common.CurrentProgress {
+ return a.progressOutChan
+}
+
+// GetDone returns channel for checking when analysis is done
+func (a *ParallelStableOrderAnalyzer) GetDone() common.SignalGroup {
+ return a.doneChan
+}
+
+// ResetProgress returns progress
+func (a *ParallelStableOrderAnalyzer) ResetProgress() {
+ a.progress = &common.CurrentProgress{}
+ a.progressChan = make(chan common.CurrentProgress, 1)
+ a.progressOutChan = make(chan common.CurrentProgress, 1)
+ a.progressDoneChan = make(chan struct{})
+ a.doneChan = make(common.SignalGroup)
+ a.wait = (&WaitGroup{}).Init()
+}
+
+// AnalyzeDir analyzes given path
+func (a *ParallelStableOrderAnalyzer) AnalyzeDir(
+ path string, ignore common.ShouldDirBeIgnored, fileTypeFilter common.ShouldFileBeIgnored,
+) fs.Item {
+ a.ignoreDir = ignore
+ a.ignoreFileType = fileTypeFilter
+
+ go a.updateProgress()
+ dir := a.processDir(path)
+
+ dir.BasePath = filepath.Dir(path)
+ a.wait.Wait()
+
+ a.progressDoneChan <- struct{}{}
+ a.doneChan.Broadcast()
+
+ return dir
+}
+
+func (a *ParallelStableOrderAnalyzer) processDir(path string) *Dir {
+ type indexedItem struct {
+ index int
+ item fs.Item
+ }
+
+ var (
+ file *File
+ err error
+ totalSize int64
+ info os.FileInfo
+ itemCount int
+ dirCount int
+ )
+
+ a.wait.Add(1)
+
+ files, err := os.ReadDir(path)
+ if err != nil {
+ log.Print(err.Error())
+ }
+
+ dir := &Dir{
+ File: &File{
+ Name: filepath.Base(path),
+ Flag: getDirFlag(err, len(files)),
+ },
+ ItemCount: 1,
+ Files: make(fs.Files, 0, len(files)),
+ }
+ setDirPlatformSpecificAttrs(dir, path)
+
+ // Buffer channel to prevent deadlock when sending files synchronously
+ itemChan := make(chan indexedItem, len(files))
+
+ for _, f := range files {
+ name := f.Name()
+ entryPath := filepath.Join(path, name)
+ if f.IsDir() {
+ if a.ignoreDir(name, entryPath) {
+ continue
+ }
+ currentIndex := itemCount
+ itemCount++
+ dirCount++
+
+ go func(entryPath string, idx int) {
+ concurrencyLimit <- struct{}{}
+ subdir := a.processDir(entryPath)
+ subdir.Parent = dir
+
+ itemChan <- indexedItem{idx, subdir}
+ <-concurrencyLimit
+ }(entryPath, currentIndex)
+ } else {
+ info, err = f.Info()
+ if err != nil {
+ log.Print(err.Error())
+ dir.Flag = '!'
+ continue
+ }
+ if a.followSymlinks && info.Mode()&os.ModeSymlink != 0 {
+ infoF, err := followSymlink(entryPath, a.gitAnnexedSize)
+ if err != nil {
+ log.Print(err.Error())
+ dir.Flag = '!'
+ continue
+ }
+ if infoF != nil {
+ info = infoF
+ }
+ }
+
+ // Apply file type filter if set
+ if a.ignoreFileType != nil && a.ignoreFileType(name) {
+ continue // Skip this file
+ }
+
+ file = &File{
+ Name: name,
+ Flag: getFlag(info),
+ Size: info.Size(),
+ Parent: dir,
+ }
+ setPlatformSpecificAttrs(file, info)
+
+ totalSize += file.Usage
+
+ // Send file to channel with its index
+ itemChan <- indexedItem{itemCount, file}
+ itemCount++
+ }
+ }
+
+ go func() {
+ items := make([]indexedItem, itemCount)
+
+ // Collect all items (both files and subdirs)
+ for i := 0; i < itemCount; i++ {
+ indexed := <-itemChan
+ items[indexed.index] = indexed
+ }
+
+ // Add all items in their original order
+ for i := 0; i < itemCount; i++ {
+ dir.AddFile(items[i].item)
+ }
+
+ a.wait.Done()
+ }()
+
+ a.progressChan <- common.CurrentProgress{
+ CurrentItemName: path,
+ ItemCount: int64(len(files)),
+ TotalSize: totalSize,
+ }
+ return dir
+}
+
+func (a *ParallelStableOrderAnalyzer) updateProgress() {
+ for {
+ select {
+ case <-a.progressDoneChan:
+ return
+ case progress := <-a.progressChan:
+ a.progress.CurrentItemName = progress.CurrentItemName
+ a.progress.ItemCount += progress.ItemCount
+ a.progress.TotalSize += progress.TotalSize
+ }
+
+ select {
+ case a.progressOutChan <- *a.progress:
+ default:
+ }
+ }
+}
--- /dev/null
+package analyze
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/dundee/gdu/v5/internal/common"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ log "github.com/sirupsen/logrus"
+)
+
+// SequentialAnalyzer implements Analyzer
+type SequentialAnalyzer struct {
+ progress *common.CurrentProgress
+ progressChan chan common.CurrentProgress
+ progressOutChan chan common.CurrentProgress
+ progressDoneChan chan struct{}
+ doneChan common.SignalGroup
+ wait *WaitGroup
+ ignoreDir common.ShouldDirBeIgnored
+ ignoreFileType common.ShouldFileBeIgnored
+ followSymlinks bool
+ gitAnnexedSize bool
+ matchesTimeFilterFn common.TimeFilter
+ archiveBrowsing bool
+}
+
+// CreateSeqAnalyzer returns Analyzer
+func CreateSeqAnalyzer() *SequentialAnalyzer {
+ return &SequentialAnalyzer{
+ progress: &common.CurrentProgress{
+ ItemCount: 0,
+ TotalSize: int64(0),
+ },
+ progressChan: make(chan common.CurrentProgress, 1),
+ progressOutChan: make(chan common.CurrentProgress, 1),
+ progressDoneChan: make(chan struct{}),
+ doneChan: make(common.SignalGroup),
+ wait: (&WaitGroup{}).Init(),
+ }
+}
+
+// SetFollowSymlinks sets whether symlink to files should be followed
+func (a *SequentialAnalyzer) SetFollowSymlinks(v bool) {
+ a.followSymlinks = v
+}
+
+// SetShowAnnexedSize sets whether to use annexed size of git-annex files
+func (a *SequentialAnalyzer) SetShowAnnexedSize(v bool) {
+ a.gitAnnexedSize = v
+}
+
+// SetTimeFilter sets the time filter function for file inclusion
+func (a *SequentialAnalyzer) SetTimeFilter(matchesTimeFilterFn common.TimeFilter) {
+ a.matchesTimeFilterFn = matchesTimeFilterFn
+}
+
+// SetArchiveBrowsing sets whether browsing of zip/jar archives is enabled
+func (a *SequentialAnalyzer) SetArchiveBrowsing(v bool) {
+ a.archiveBrowsing = v
+}
+
+// SetFileTypeFilter sets the file type filter function
+func (a *SequentialAnalyzer) SetFileTypeFilter(filter common.ShouldFileBeIgnored) {
+ a.ignoreFileType = filter
+}
+
+// GetProgressChan returns channel for getting progress
+func (a *SequentialAnalyzer) GetProgressChan() chan common.CurrentProgress {
+ return a.progressOutChan
+}
+
+// GetDone returns channel for checking when analysis is done
+func (a *SequentialAnalyzer) GetDone() common.SignalGroup {
+ return a.doneChan
+}
+
+// ResetProgress returns progress
+func (a *SequentialAnalyzer) ResetProgress() {
+ a.progress = &common.CurrentProgress{}
+ a.progressChan = make(chan common.CurrentProgress, 1)
+ a.progressOutChan = make(chan common.CurrentProgress, 1)
+ a.progressDoneChan = make(chan struct{})
+ a.doneChan = make(common.SignalGroup)
+}
+
+// AnalyzeDir analyzes given path
+func (a *SequentialAnalyzer) AnalyzeDir(
+ path string, ignore common.ShouldDirBeIgnored, fileTypeFilter common.ShouldFileBeIgnored,
+) fs.Item {
+ a.ignoreDir = ignore
+ a.ignoreFileType = fileTypeFilter
+
+ go a.updateProgress()
+ dir := a.processDir(path)
+
+ dir.BasePath = filepath.Dir(path)
+
+ a.progressDoneChan <- struct{}{}
+ a.doneChan.Broadcast()
+
+ return dir
+}
+
+func (a *SequentialAnalyzer) processDir(path string) *Dir {
+ var (
+ file fs.Item
+ err error
+ totalSize int64
+ info os.FileInfo
+ dirCount int
+ )
+
+ files, err := os.ReadDir(path)
+ if err != nil {
+ log.Print(err.Error())
+ }
+
+ dir := &Dir{
+ File: &File{
+ Name: filepath.Base(path),
+ Flag: getDirFlag(err, len(files)),
+ },
+ ItemCount: 1,
+ Files: make(fs.Files, 0, len(files)),
+ }
+ setDirPlatformSpecificAttrs(dir, path)
+
+ for _, f := range files {
+ name := f.Name()
+ entryPath := filepath.Join(path, name)
+ if f.IsDir() {
+ if a.ignoreDir(name, entryPath) {
+ continue
+ }
+ dirCount++
+
+ subdir := a.processDir(entryPath)
+ subdir.Parent = dir
+ dir.AddFile(subdir)
+ } else {
+ info, err = f.Info()
+ if err != nil {
+ log.Print(err.Error())
+ dir.Flag = '!'
+ continue
+ }
+ if a.followSymlinks && info.Mode()&os.ModeSymlink != 0 {
+ infoF, err := followSymlink(entryPath, a.gitAnnexedSize)
+ if err != nil {
+ log.Print(err.Error())
+ dir.Flag = '!'
+ continue
+ }
+ if infoF != nil {
+ info = infoF
+ }
+ }
+
+ // Check if it's a zip or jar file
+ if a.archiveBrowsing && isZipFile(name) {
+ zipDir, err := processZipFile(entryPath, info)
+ if err != nil {
+ // If unable to process zip file, treat as regular file
+ log.Printf("Failed to process zip file %s: %v", entryPath, err)
+ file = &File{
+ Name: name,
+ Flag: getFlag(info),
+ Size: info.Size(),
+ Parent: dir,
+ }
+ } else {
+ // Successfully processed zip file, use zip content size
+ uncompressedSize, compressedSize, err := getZipFileSize(entryPath)
+ if err == nil {
+ zipDir.Size = uncompressedSize
+ zipDir.Usage = compressedSize
+ }
+ zipDir.Parent = dir
+ file = zipDir
+ }
+ } else {
+ file = &File{
+ Name: name,
+ Flag: getFlag(info),
+ Size: info.Size(),
+ Parent: dir,
+ }
+ }
+
+ // Apply time filter if set
+ if a.matchesTimeFilterFn != nil && !a.matchesTimeFilterFn(info.ModTime()) {
+ continue // Skip this file
+ }
+
+ // Apply file type filter if set
+ if a.ignoreFileType != nil && a.ignoreFileType(name) {
+ continue // Skip this file
+ }
+
+ if file != nil {
+ // Only set platform-specific attributes for regular files
+ if regularFile, ok := file.(*File); ok {
+ setPlatformSpecificAttrs(regularFile, info)
+ }
+ totalSize += file.GetUsage()
+ dir.AddFile(file)
+ }
+ }
+ }
+
+ a.progressChan <- common.CurrentProgress{
+ CurrentItemName: path,
+ ItemCount: int64(len(files)),
+ TotalSize: totalSize,
+ }
+ return dir
+}
+
+func (a *SequentialAnalyzer) updateProgress() {
+ for {
+ select {
+ case <-a.progressDoneChan:
+ return
+ case progress := <-a.progressChan:
+ a.progress.CurrentItemName = progress.CurrentItemName
+ a.progress.ItemCount += progress.ItemCount
+ a.progress.TotalSize += progress.TotalSize
+ }
+
+ select {
+ case a.progressOutChan <- *a.progress:
+ default:
+ }
+ }
+}
--- /dev/null
+package analyze
+
+import (
+ "testing"
+ "time"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSequentialAnalyzerSetFollowSymlinks(t *testing.T) {
+ analyzer := CreateSeqAnalyzer()
+ analyzer.SetFollowSymlinks(true)
+ assert.True(t, analyzer.followSymlinks)
+ analyzer.SetFollowSymlinks(false)
+ assert.False(t, analyzer.followSymlinks)
+}
+
+func TestSequentialAnalyzerSetShowAnnexedSize(t *testing.T) {
+ analyzer := CreateSeqAnalyzer()
+ analyzer.SetShowAnnexedSize(true)
+ assert.True(t, analyzer.gitAnnexedSize)
+ analyzer.SetShowAnnexedSize(false)
+ assert.False(t, analyzer.gitAnnexedSize)
+}
+
+func TestSequentialAnalyzerUpdateProgress(t *testing.T) {
+ analyzer := CreateSeqAnalyzer()
+
+ // Start the progress updater
+ go analyzer.updateProgress()
+
+ // Send some progress updates
+ analyzer.progressChan <- struct {
+ CurrentItemName string
+ ItemCount int64
+ TotalSize int64
+ }{
+ CurrentItemName: "test",
+ ItemCount: 5,
+ TotalSize: 100,
+ }
+
+ // Wait a bit for the progress to be processed
+ time.Sleep(10 * time.Millisecond)
+
+ // Send done signal
+ analyzer.progressDoneChan <- struct{}{}
+
+ // Wait for the updater to finish
+ time.Sleep(10 * time.Millisecond)
+}
+
+func TestSequentialAnalyzerUpdateProgressWithDefaultCase(t *testing.T) {
+ analyzer := CreateSeqAnalyzer()
+
+ // Start the progress updater
+ go analyzer.updateProgress()
+
+ // Send some progress updates
+ analyzer.progressChan <- struct {
+ CurrentItemName string
+ ItemCount int64
+ TotalSize int64
+ }{
+ CurrentItemName: "test",
+ ItemCount: 5,
+ TotalSize: 100,
+ }
+
+ // Wait a bit for the progress to be processed
+ time.Sleep(10 * time.Millisecond)
+
+ // Send another progress update to trigger the default case
+ analyzer.progressChan <- struct {
+ CurrentItemName string
+ ItemCount int64
+ TotalSize int64
+ }{
+ CurrentItemName: "test2",
+ ItemCount: 3,
+ TotalSize: 50,
+ }
+
+ // Wait a bit for the progress to be processed
+ time.Sleep(10 * time.Millisecond)
+
+ // Send done signal
+ analyzer.progressDoneChan <- struct{}{}
+
+ // Wait for the updater to finish
+ time.Sleep(10 * time.Millisecond)
+}
+
+func TestSequentialAnalyzerAnalyzeDirWithIgnoreDir(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ analyzer := CreateSeqAnalyzer()
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(name, _ string) bool { return name == "nested" }, func(_ string) bool { return false },
+ ).(*Dir)
+
+ analyzer.GetDone().Wait()
+
+ assert.NotNil(t, dir)
+ assert.Equal(t, "test_dir", dir.Name)
+ // Should have fewer items since nested directory was ignored
+ assert.Less(t, dir.ItemCount, int64(5))
+}
--- /dev/null
+package analyze
+
+import (
+ "os"
+ "sort"
+ "testing"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/stretchr/testify/assert"
+)
+
+func init() {
+ log.SetLevel(log.WarnLevel)
+}
+
+func TestAnalyzeDirSeq(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ analyzer := CreateSeqAnalyzer()
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*Dir)
+
+ progress := <-analyzer.GetProgressChan()
+ assert.GreaterOrEqual(t, progress.TotalSize, int64(0))
+
+ analyzer.GetDone().Wait()
+ analyzer.ResetProgress()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ // test dir info
+ assert.Equal(t, "test_dir", dir.Name)
+ assert.Equal(t, int64(7+4096*3), dir.Size)
+ assert.Equal(t, int64(5), dir.ItemCount)
+ assert.True(t, dir.IsDir())
+
+ // test dir tree
+ assert.Equal(t, "nested", dir.Files[0].GetName())
+ assert.Equal(t, "subnested", dir.Files[0].(*Dir).Files[1].GetName())
+
+ // test file
+ assert.Equal(t, "file2", dir.Files[0].(*Dir).Files[0].GetName())
+ assert.Equal(t, int64(2), dir.Files[0].(*Dir).Files[0].GetSize())
+
+ assert.Equal(
+ t, "file", dir.Files[0].(*Dir).Files[1].(*Dir).Files[0].GetName(),
+ )
+ assert.Equal(
+ t, int64(5), dir.Files[0].(*Dir).Files[1].(*Dir).Files[0].GetSize(),
+ )
+
+ // test parent link
+ assert.Equal(
+ t,
+ "test_dir",
+ dir.Files[0].(*Dir).
+ Files[1].(*Dir).
+ Files[0].
+ GetParent().
+ GetParent().
+ GetParent().
+ GetName(),
+ )
+}
+
+func TestIgnoreDirSeq(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ dir := CreateSeqAnalyzer().AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return true }, func(_ string) bool { return false },
+ ).(*Dir)
+
+ assert.Equal(t, "test_dir", dir.Name)
+ assert.Equal(t, int64(1), dir.ItemCount)
+}
+
+func TestFlagsSeq(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ err := os.Mkdir("test_dir/empty", 0o644)
+ assert.Nil(t, err)
+
+ err = os.Symlink("test_dir/nested/file2", "test_dir/nested/file3")
+ assert.Nil(t, err)
+
+ analyzer := CreateSeqAnalyzer()
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*Dir)
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ sort.Sort(sort.Reverse(dir.Files))
+
+ assert.Equal(t, int64(28+4096*4), dir.Size)
+ assert.Equal(t, int64(7), dir.ItemCount)
+
+ // test file3
+ assert.Equal(t, "nested", dir.Files[0].GetName())
+ assert.Equal(t, "file3", dir.Files[0].(*Dir).Files[1].GetName())
+ assert.Equal(t, int64(21), dir.Files[0].(*Dir).Files[1].GetSize())
+ assert.Equal(t, '@', dir.Files[0].(*Dir).Files[1].GetFlag())
+
+ assert.Equal(t, 'e', dir.Files[1].GetFlag())
+}
+
+func TestHardlinkSeq(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ err := os.Link("test_dir/nested/file2", "test_dir/nested/file3")
+ assert.Nil(t, err)
+
+ analyzer := CreateSeqAnalyzer()
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*Dir)
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ assert.Equal(t, int64(7+4096*3), dir.Size) // file2 and file3 are counted just once for size
+ assert.Equal(t, int64(6), dir.ItemCount) // but twice for item count
+
+ // test file3
+ assert.Equal(t, "file3", dir.Files[0].(*Dir).Files[1].GetName())
+ assert.Equal(t, int64(2), dir.Files[0].(*Dir).Files[1].GetSize())
+ assert.Equal(t, 'H', dir.Files[0].(*Dir).Files[1].GetFlag())
+}
+
+func TestFollowSymlinkSeq(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ err := os.Mkdir("test_dir/empty", 0o644)
+ assert.Nil(t, err)
+
+ err = os.Symlink("./file2", "test_dir/nested/file3")
+ assert.Nil(t, err)
+
+ analyzer := CreateSeqAnalyzer()
+ analyzer.SetFollowSymlinks(true)
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*Dir)
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ sort.Sort(sort.Reverse(dir.Files))
+
+ assert.Equal(t, int64(9+4096*4), dir.Size)
+ assert.Equal(t, int64(7), dir.ItemCount)
+
+ // test file3
+ assert.Equal(t, "nested", dir.Files[0].GetName())
+ assert.Equal(t, "file3", dir.Files[0].(*Dir).Files[1].GetName())
+ assert.Equal(t, int64(2), dir.Files[0].(*Dir).Files[1].GetSize())
+ assert.Equal(t, ' ', dir.Files[0].(*Dir).Files[1].GetFlag())
+
+ assert.Equal(t, 'e', dir.Files[1].GetFlag())
+}
+
+func TestBrokenSymlinkSkippedSeq(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ err := os.Mkdir("test_dir/empty", 0o644)
+ assert.Nil(t, err)
+
+ err = os.Symlink("xxx", "test_dir/nested/file3")
+ assert.Nil(t, err)
+
+ analyzer := CreateSeqAnalyzer()
+ analyzer.SetFollowSymlinks(true)
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*Dir)
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ sort.Sort(sort.Reverse(dir.Files))
+
+ assert.Equal(t, int64(7+4096*4), dir.Size)
+ assert.Equal(t, int64(6), dir.ItemCount)
+
+ assert.Equal(t, '!', dir.Files[0].GetFlag())
+}
+
+func BenchmarkAnalyzeDirSeq(b *testing.B) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ b.ResetTimer()
+
+ analyzer := CreateSeqAnalyzer()
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ )
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+}
--- /dev/null
+package analyze
+
+import (
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSortByUsage(t *testing.T) {
+ files := fs.Files{
+ &File{
+ Usage: 1,
+ },
+ &File{
+ Usage: 2,
+ },
+ &File{
+ Usage: 3,
+ },
+ }
+
+ sort.Sort(sort.Reverse(files))
+
+ assert.Equal(t, int64(3), files[0].GetUsage())
+ assert.Equal(t, int64(2), files[1].GetUsage())
+ assert.Equal(t, int64(1), files[2].GetUsage())
+}
+
+func TestStableSortByUsage(t *testing.T) {
+ files := fs.Files{
+ &File{
+ Name: "aaa",
+ Usage: 1,
+ },
+ &File{
+ Name: "bbb",
+ Usage: 1,
+ },
+ &File{
+ Name: "ccc",
+ Usage: 3,
+ },
+ }
+
+ sort.Sort(sort.Reverse(files))
+
+ assert.Equal(t, "ccc", files[0].GetName())
+ assert.Equal(t, "bbb", files[1].GetName())
+ assert.Equal(t, "aaa", files[2].GetName())
+}
+
+func TestSortByUsageAsc(t *testing.T) {
+ files := fs.Files{
+ &File{
+ Size: 1,
+ },
+ &File{
+ Size: 2,
+ },
+ &File{
+ Size: 3,
+ },
+ }
+
+ sort.Sort(files)
+
+ assert.Equal(t, int64(1), files[0].GetSize())
+ assert.Equal(t, int64(2), files[1].GetSize())
+ assert.Equal(t, int64(3), files[2].GetSize())
+}
+
+func TestSortBySize(t *testing.T) {
+ files := fs.Files{
+ &File{
+ Size: 1,
+ },
+ &File{
+ Size: 2,
+ },
+ &File{
+ Size: 3,
+ },
+ }
+
+ sort.Sort(sort.Reverse(fs.ByApparentSize(files)))
+
+ assert.Equal(t, int64(3), files[0].GetSize())
+ assert.Equal(t, int64(2), files[1].GetSize())
+ assert.Equal(t, int64(1), files[2].GetSize())
+}
+
+func TestSortBySizeAsc(t *testing.T) {
+ files := fs.Files{
+ &File{
+ Size: 1,
+ },
+ &File{
+ Size: 2,
+ },
+ &File{
+ Size: 3,
+ },
+ }
+
+ sort.Sort(fs.ByApparentSize(files))
+
+ assert.Equal(t, int64(1), files[0].GetSize())
+ assert.Equal(t, int64(2), files[1].GetSize())
+ assert.Equal(t, int64(3), files[2].GetSize())
+}
+
+func TestSortByItemCount(t *testing.T) {
+ files := fs.Files{
+ &Dir{
+ ItemCount: 1,
+ },
+ &Dir{
+ ItemCount: 2,
+ },
+ &Dir{
+ ItemCount: 3,
+ },
+ }
+
+ sort.Sort(sort.Reverse(fs.ByItemCount(files)))
+
+ assert.Equal(t, int64(3), files[0].GetItemCount())
+ assert.Equal(t, int64(2), files[1].GetItemCount())
+ assert.Equal(t, int64(1), files[2].GetItemCount())
+}
+
+func TestSortByName(t *testing.T) {
+ files := fs.Files{
+ &File{
+ Name: "aa",
+ },
+ &File{
+ Name: "bb",
+ },
+ &File{
+ Name: "cc",
+ },
+ }
+
+ sort.Sort(sort.Reverse(fs.ByName(files)))
+
+ assert.Equal(t, "cc", files[0].GetName())
+ assert.Equal(t, "bb", files[1].GetName())
+ assert.Equal(t, "aa", files[2].GetName())
+}
+
+func TestNaturalSortByNameAsc(t *testing.T) {
+ files := fs.Files{
+ &File{
+ Name: "aa3",
+ },
+ &File{
+ Name: "aa20",
+ },
+ &File{
+ Name: "aa100",
+ },
+ }
+
+ sort.Sort(fs.ByName(files))
+
+ assert.Equal(t, "aa3", files[0].GetName())
+ assert.Equal(t, "aa20", files[1].GetName())
+ assert.Equal(t, "aa100", files[2].GetName())
+}
+
+func TestSortByMtime(t *testing.T) {
+ files := fs.Files{
+ &File{
+ Mtime: time.Date(2021, 8, 19, 0, 40, 0, 0, time.UTC),
+ },
+ &File{
+ Mtime: time.Date(2021, 8, 19, 0, 41, 0, 0, time.UTC),
+ },
+ &File{
+ Mtime: time.Date(2021, 8, 19, 0, 42, 0, 0, time.UTC),
+ },
+ }
+
+ sort.Sort(sort.Reverse(fs.ByMtime(files)))
+
+ assert.Equal(t, 42, files[0].GetMtime().Minute())
+ assert.Equal(t, 41, files[1].GetMtime().Minute())
+ assert.Equal(t, 40, files[2].GetMtime().Minute())
+}
--- /dev/null
+package analyze
+
+import (
+ "database/sql"
+ "io"
+ "iter"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/dundee/gdu/v5/internal/common"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/pkg/errors"
+ log "github.com/sirupsen/logrus"
+)
+
+// SqliteStorage represents SQLite database storage
+type SqliteStorage struct {
+ db *sql.DB
+ dbPath string
+ m sync.RWMutex
+ tx *sql.Tx
+ insertStmt *sql.Stmt
+ updateStmt *sql.Stmt
+ hasInodeStmt *sql.Stmt
+}
+
+// NewSqliteStorage creates a new SQLite storage and initializes the schema
+func NewSqliteStorage(dbPath string) (*SqliteStorage, error) {
+ parentDir := filepath.Dir(dbPath)
+ if err := os.MkdirAll(parentDir, 0o755); err != nil {
+ return nil, errors.Wrap(err, "failed to create parent directory for SQLite database")
+ }
+
+ db, err := sql.Open("sqlite", dbPath)
+ if err != nil {
+ return nil, err
+ }
+
+ storage := &SqliteStorage{
+ db: db,
+ dbPath: dbPath,
+ }
+
+ if err := storage.createTables(); err != nil {
+ db.Close()
+ return nil, err
+ }
+
+ return storage, nil
+}
+
+// createTables creates the database schema if it doesn't exist
+func (s *SqliteStorage) createTables() error {
+ // Optimize for insertion speed
+ pragmas := `
+ PRAGMA synchronous = OFF;
+ PRAGMA journal_mode = MEMORY;
+ PRAGMA cache_size = -64000;
+ PRAGMA temp_store = MEMORY;
+ `
+ if _, err := s.db.Exec(pragmas); err != nil {
+ return err
+ }
+
+ schema := `
+ CREATE TABLE IF NOT EXISTS items (
+ id INTEGER PRIMARY KEY,
+ parent_id INTEGER REFERENCES items(id),
+ name TEXT NOT NULL,
+ is_dir INTEGER NOT NULL,
+ size INTEGER NOT NULL,
+ usage INTEGER NOT NULL,
+ mtime INTEGER NOT NULL,
+ item_count INTEGER NOT NULL DEFAULT 1,
+ mli INTEGER NOT NULL DEFAULT 0,
+ flag TEXT NOT NULL DEFAULT ' '
+ );
+
+ CREATE INDEX IF NOT EXISTS idx_items_parent_id ON items(parent_id);
+ CREATE INDEX IF NOT EXISTS idx_items_mli ON items(mli) WHERE mli != 0;
+
+ CREATE TABLE IF NOT EXISTS metadata (
+ key TEXT PRIMARY KEY,
+ value TEXT
+ );
+ `
+
+ _, err := s.db.Exec(schema)
+ return err
+}
+
+// Close closes the database connection
+func (s *SqliteStorage) Close() error {
+ s.m.Lock()
+ defer s.m.Unlock()
+ if s.db != nil {
+ return s.db.Close()
+ }
+ return nil
+}
+
+// ClearItems removes all items from the database
+func (s *SqliteStorage) ClearItems() error {
+ _, err := s.db.Exec("DELETE FROM items")
+ return err
+}
+
+// BeginBulkInsert starts a transaction and prepares statements for bulk insertion
+func (s *SqliteStorage) BeginBulkInsert() error {
+ s.m.Lock()
+ defer s.m.Unlock()
+
+ tx, err := s.db.Begin()
+ if err != nil {
+ return err
+ }
+ s.tx = tx
+
+ s.insertStmt, err = tx.Prepare(
+ `INSERT INTO items (parent_id, name, is_dir, size, usage, mtime, item_count, mli, flag)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
+ )
+ if err != nil {
+ rollbackErr := tx.Rollback()
+ if rollbackErr != nil {
+ log.Errorf("failed to rollback transaction: %v", rollbackErr)
+ }
+ return err
+ }
+
+ s.updateStmt, err = tx.Prepare(
+ `UPDATE items SET size = ?, usage = ?, item_count = ? WHERE id = ?`,
+ )
+ if err != nil {
+ s.insertStmt.Close()
+ rollbackErr := tx.Rollback()
+ if rollbackErr != nil {
+ log.Errorf("failed to rollback transaction: %v", rollbackErr)
+ }
+ return err
+ }
+
+ s.hasInodeStmt, err = tx.Prepare(
+ `SELECT 1 FROM items WHERE mli = ? LIMIT 1`,
+ )
+ if err != nil {
+ s.insertStmt.Close()
+ s.updateStmt.Close()
+ rollbackErr := tx.Rollback()
+ if rollbackErr != nil {
+ log.Errorf("failed to rollback transaction: %v", rollbackErr)
+ }
+ return err
+ }
+
+ return nil
+}
+
+// EndBulkInsert commits the transaction and closes prepared statements
+func (s *SqliteStorage) EndBulkInsert() error {
+ s.m.Lock()
+ defer s.m.Unlock()
+
+ if s.insertStmt != nil {
+ s.insertStmt.Close()
+ s.insertStmt = nil
+ }
+ if s.updateStmt != nil {
+ s.updateStmt.Close()
+ s.updateStmt = nil
+ }
+ if s.hasInodeStmt != nil {
+ s.hasInodeStmt.Close()
+ s.hasInodeStmt = nil
+ }
+ if s.tx != nil {
+ err := s.tx.Commit()
+ s.tx = nil
+ return err
+ }
+ return nil
+}
+
+// HasData returns true if the database contains analysis data
+func (s *SqliteStorage) HasData() bool {
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ var rowid int
+ err := s.db.QueryRow("SELECT MAX(rowid) FROM items").Scan(&rowid)
+ if err != nil {
+ return false
+ }
+ return rowid > 0
+}
+
+// HasInode returns true if a file with the given inode already exists in the database
+func (s *SqliteStorage) HasInode(mli uint64) bool {
+ var exists int
+ var err error
+
+ if s.hasInodeStmt != nil {
+ err = s.hasInodeStmt.QueryRow(mli).Scan(&exists)
+ } else {
+ s.m.RLock()
+ err = s.db.QueryRow(`SELECT 1 FROM items WHERE mli = ? LIMIT 1`, mli).Scan(&exists)
+ s.m.RUnlock()
+ }
+
+ return err == nil
+}
+
+// GetRootItem returns the root item (item with no parent)
+func (s *SqliteStorage) GetRootItem() (*SqliteItem, error) {
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ item := &SqliteItem{storage: s}
+ var parentID sql.NullInt64
+ var isDirInt int
+ var mtimeUnix int64
+ var flag string
+
+ err := s.db.QueryRow(
+ `SELECT id, parent_id, name, is_dir, size, usage, mtime, item_count, mli, flag
+ FROM items WHERE parent_id IS NULL LIMIT 1`,
+ ).Scan(
+ &item.id, &parentID, &item.name, &isDirInt,
+ &item.size, &item.usage, &mtimeUnix, &item.itemCount,
+ &item.mli, &flag,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ item.isDir = isDirInt == 1
+ item.mtime = time.Unix(mtimeUnix, 0)
+ if flag != "" {
+ item.flag = rune(flag[0])
+ } else {
+ item.flag = ' '
+ }
+
+ return item, nil
+}
+
+// InsertItem inserts a file/directory item into the database
+func (s *SqliteStorage) InsertItem(
+ parentID *int64, name string, isDir bool, size, usage int64, mtime time.Time, itemCount int, mli uint64, flag rune,
+) (int64, error) {
+ isDirInt := 0
+ if isDir {
+ isDirInt = 1
+ }
+
+ var result sql.Result
+ var err error
+
+ // Use prepared statement if in bulk mode, otherwise use direct exec
+ if s.insertStmt != nil {
+ result, err = s.insertStmt.Exec(parentID, name, isDirInt, size, usage, mtime.Unix(), itemCount, mli, string(flag))
+ } else {
+ s.m.Lock()
+ result, err = s.db.Exec(
+ `INSERT INTO items (parent_id, name, is_dir, size, usage, mtime, item_count, mli, flag)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
+ parentID, name, isDirInt, size, usage, mtime.Unix(), itemCount, mli, string(flag),
+ )
+ s.m.Unlock()
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ return result.LastInsertId()
+}
+
+// UpdateItem updates an existing item's stats
+func (s *SqliteStorage) UpdateItem(id, size, usage, itemCount int64) error {
+ var err error
+
+ // Use prepared statement if in bulk mode, otherwise use direct exec
+ if s.updateStmt != nil {
+ _, err = s.updateStmt.Exec(size, usage, itemCount, id)
+ } else {
+ s.m.Lock()
+ _, err = s.db.Exec(
+ `UPDATE items SET size = ?, usage = ?, item_count = ? WHERE id = ?`,
+ size, usage, itemCount, id,
+ )
+ s.m.Unlock()
+ }
+ return err
+}
+
+// GetChildren returns all children of a given parent ID
+func (s *SqliteStorage) GetChildren(parentID int64) ([]*SqliteItem, error) {
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ rows, err := s.db.Query(
+ `SELECT id, parent_id, name, is_dir, size, usage, mtime, item_count, mli, flag
+ FROM items WHERE parent_id = ?`,
+ parentID,
+ )
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ var items []*SqliteItem
+ for rows.Next() {
+ item := &SqliteItem{storage: s}
+ var parentID sql.NullInt64
+ var isDirInt int
+ var mtimeUnix int64
+ var flag string
+
+ err := rows.Scan(
+ &item.id, &parentID, &item.name, &isDirInt,
+ &item.size, &item.usage, &mtimeUnix, &item.itemCount,
+ &item.mli, &flag,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ if parentID.Valid {
+ item.parentID = &parentID.Int64
+ }
+ item.isDir = isDirInt == 1
+ item.mtime = time.Unix(mtimeUnix, 0)
+ if flag != "" {
+ item.flag = rune(flag[0])
+ } else {
+ item.flag = ' '
+ }
+ items = append(items, item)
+ }
+
+ return items, rows.Err()
+}
+
+// GetItemByID returns an item by its ID
+func (s *SqliteStorage) GetItemByID(id int64) (*SqliteItem, error) {
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ item := &SqliteItem{storage: s}
+ var parentID sql.NullInt64
+ var isDirInt int
+ var mtimeUnix int64
+ var flag string
+
+ err := s.db.QueryRow(
+ `SELECT id, parent_id, name, is_dir, size, usage, mtime, item_count, mli, flag
+ FROM items WHERE id = ?`,
+ id,
+ ).Scan(
+ &item.id, &parentID, &item.name, &isDirInt,
+ &item.size, &item.usage, &mtimeUnix, &item.itemCount,
+ &item.mli, &flag,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ if parentID.Valid {
+ item.parentID = &parentID.Int64
+ }
+ item.isDir = isDirInt == 1
+ item.mtime = time.Unix(mtimeUnix, 0)
+ if flag != "" {
+ item.flag = rune(flag[0])
+ } else {
+ item.flag = ' '
+ }
+
+ return item, nil
+}
+
+// SetMetadata stores a metadata key-value pair
+func (s *SqliteStorage) SetMetadata(key, value string) error {
+ s.m.Lock()
+ defer s.m.Unlock()
+
+ _, err := s.db.Exec(
+ `INSERT OR REPLACE INTO metadata (key, value) VALUES (?, ?)`,
+ key, value,
+ )
+ return err
+}
+
+// GetMetadata retrieves a metadata value by key
+func (s *SqliteStorage) GetMetadata(key string) (string, error) {
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ var value string
+ err := s.db.QueryRow(`SELECT value FROM metadata WHERE key = ?`, key).Scan(&value)
+ return value, err
+}
+
+// SqliteItem represents a file or directory stored in SQLite
+type SqliteItem struct {
+ storage *SqliteStorage
+ id int64
+ parentID *int64
+ name string
+ isDir bool
+ size int64
+ usage int64
+ mtime time.Time
+ itemCount int64
+ mli uint64
+ flag rune
+ parent fs.Item
+ m sync.RWMutex
+}
+
+// GetPath returns the full path of the item
+func (i *SqliteItem) GetPath() string {
+ if i.parent != nil {
+ return filepath.Join(i.parent.GetPath(), i.name)
+ }
+ // For root item, get basePath from metadata
+ basePath, err := i.storage.GetMetadata("top_dir_path")
+ if err != nil {
+ return i.name
+ }
+ return filepath.Join(filepath.Dir(basePath), i.name)
+}
+
+// GetName returns the name of the item
+func (i *SqliteItem) GetName() string {
+ return i.name
+}
+
+// GetFlag returns the flag of the item
+func (i *SqliteItem) GetFlag() rune {
+ return i.flag
+}
+
+// IsDir returns true if the item is a directory
+func (i *SqliteItem) IsDir() bool {
+ return i.isDir
+}
+
+// GetSize returns the apparent size
+func (i *SqliteItem) GetSize() int64 {
+ return i.size
+}
+
+// GetType returns the type of the item
+func (i *SqliteItem) GetType() string {
+ if i.isDir {
+ return "Directory"
+ }
+ if i.flag == '@' {
+ return "Other"
+ }
+ return "File"
+}
+
+// GetUsage returns the disk usage
+func (i *SqliteItem) GetUsage() int64 {
+ return i.usage
+}
+
+// GetMtime returns the modification time
+func (i *SqliteItem) GetMtime() time.Time {
+ return i.mtime
+}
+
+// GetItemCount returns the item count
+func (i *SqliteItem) GetItemCount() int64 {
+ return i.itemCount
+}
+
+// GetParent returns the parent item
+func (i *SqliteItem) GetParent() fs.Item {
+ if i.parent != nil {
+ return i.parent
+ }
+ if i.parentID == nil {
+ return nil
+ }
+
+ parent, err := i.storage.GetItemByID(*i.parentID)
+ if err != nil {
+ log.Print(err.Error())
+ return nil
+ }
+ i.parent = parent
+ return parent
+}
+
+// SetParent sets the parent item
+func (i *SqliteItem) SetParent(parent fs.Item) {
+ i.parent = parent
+}
+
+// GetMultiLinkedInode returns the multi-linked inode number
+func (i *SqliteItem) GetMultiLinkedInode() uint64 {
+ return i.mli
+}
+
+// EncodeJSON encodes the item to JSON
+func (i *SqliteItem) EncodeJSON(writer io.Writer, topLevel bool) error {
+ // Delegate to standard encoding logic
+ // This is a simplified version - full implementation would mirror Dir.EncodeJSON
+ return nil
+}
+
+// GetItemStats returns item statistics - hard links already handled during scan
+func (i *SqliteItem) GetItemStats(linkedItems fs.HardLinkedItems) (itemCount, size, usage int64) {
+ return i.itemCount, i.size, i.usage
+}
+
+// UpdateStats is a no-op for SqliteItem - hard links are handled during scan
+func (i *SqliteItem) UpdateStats(linkedItems fs.HardLinkedItems) {
+}
+
+// AddFile adds a child file (no-op for SQLite items - children are in DB)
+func (i *SqliteItem) AddFile(item fs.Item) {
+ // Children are stored in database via parent_id relationship
+}
+
+// GetFiles returns children as a sorted iterator
+func (i *SqliteItem) GetFiles(sortBy fs.SortBy, order fs.SortOrder) iter.Seq[fs.Item] {
+ return func(yield func(fs.Item) bool) {
+ children, err := i.storage.GetChildren(i.id)
+ if err != nil {
+ log.Print(err.Error())
+ return
+ }
+
+ // Convert to fs.Files for sorting
+ files := make(fs.Files, len(children))
+ for idx, child := range children {
+ child.parent = i
+ files[idx] = child
+ }
+
+ sortFiles(files, sortBy, order)
+
+ for _, item := range files {
+ if !yield(item) {
+ return
+ }
+ }
+ }
+}
+
+// GetFilesLocked returns children with locking
+func (i *SqliteItem) GetFilesLocked(sortBy fs.SortBy, order fs.SortOrder) iter.Seq[fs.Item] {
+ return i.GetFiles(sortBy, order)
+}
+
+// RemoveFile removes a child file
+func (i *SqliteItem) RemoveFile(item fs.Item) {
+ // TODO: implement deletion from database
+}
+
+// RemoveFileByName removes a child by name
+func (i *SqliteItem) RemoveFileByName(name string) {
+ // TODO: implement deletion from database
+}
+
+// RLock returns a no-op unlock function
+func (i *SqliteItem) RLock() func() {
+ i.m.RLock()
+ return i.m.RUnlock
+}
+
+// SqliteAnalyzer implements Analyzer using SQLite storage
+type SqliteAnalyzer struct {
+ storage *SqliteStorage
+ progress *common.CurrentProgress
+ progressChan chan common.CurrentProgress
+ progressOutChan chan common.CurrentProgress
+ progressDoneChan chan struct{}
+ doneChan common.SignalGroup
+ wait *WaitGroup
+ ignoreDir common.ShouldDirBeIgnored
+ ignoreFileType common.ShouldFileBeIgnored
+ followSymlinks bool
+ gitAnnexedSize bool
+ matchesTimeFilterFn common.TimeFilter
+ archiveBrowsing bool
+}
+
+// CreateSqliteAnalyzer creates a new SQLite analyzer
+func CreateSqliteAnalyzer(dbPath string) (*SqliteAnalyzer, error) {
+ if err := checkAvailable(); err != nil {
+ return nil, err
+ }
+
+ storage, err := NewSqliteStorage(dbPath)
+ if err != nil {
+ return nil, err
+ }
+
+ return &SqliteAnalyzer{
+ storage: storage,
+ progress: &common.CurrentProgress{
+ ItemCount: 0,
+ TotalSize: int64(0),
+ },
+ progressChan: make(chan common.CurrentProgress, 1),
+ progressOutChan: make(chan common.CurrentProgress, 1),
+ progressDoneChan: make(chan struct{}),
+ doneChan: make(common.SignalGroup),
+ wait: (&WaitGroup{}).Init(),
+ }, nil
+}
+
+// SetFollowSymlinks sets whether symlinks should be followed
+func (a *SqliteAnalyzer) SetFollowSymlinks(v bool) {
+ a.followSymlinks = v
+}
+
+// SetShowAnnexedSize sets whether to use annexed size
+func (a *SqliteAnalyzer) SetShowAnnexedSize(v bool) {
+ a.gitAnnexedSize = v
+}
+
+// SetTimeFilter sets the time filter function
+func (a *SqliteAnalyzer) SetTimeFilter(matchesTimeFilterFn common.TimeFilter) {
+ a.matchesTimeFilterFn = matchesTimeFilterFn
+}
+
+// SetArchiveBrowsing sets whether archive browsing is enabled
+func (a *SqliteAnalyzer) SetArchiveBrowsing(v bool) {
+ a.archiveBrowsing = v
+}
+
+// SetFileTypeFilter sets the file type filter
+func (a *SqliteAnalyzer) SetFileTypeFilter(filter common.ShouldFileBeIgnored) {
+ a.ignoreFileType = filter
+}
+
+// GetProgressChan returns the progress channel
+func (a *SqliteAnalyzer) GetProgressChan() chan common.CurrentProgress {
+ return a.progressOutChan
+}
+
+// GetDone returns the done signal group
+func (a *SqliteAnalyzer) GetDone() common.SignalGroup {
+ return a.doneChan
+}
+
+// ResetProgress resets the progress state
+func (a *SqliteAnalyzer) ResetProgress() {
+ a.progress = &common.CurrentProgress{}
+ a.progressChan = make(chan common.CurrentProgress, 1)
+ a.progressOutChan = make(chan common.CurrentProgress, 1)
+ a.progressDoneChan = make(chan struct{})
+ a.doneChan = make(common.SignalGroup)
+ a.wait = (&WaitGroup{}).Init()
+}
+
+// AnalyzeDir analyzes the given path and stores results in SQLite.
+// If the database already contains data, it loads from the database instead of re-scanning.
+func (a *SqliteAnalyzer) AnalyzeDir(
+ path string, ignore common.ShouldDirBeIgnored, fileTypeFilter common.ShouldFileBeIgnored,
+) fs.Item {
+ // Check if database already has data
+ if a.storage.HasData() {
+ log.Printf("Loading analysis from existing SQLite database")
+ rootItem, err := a.storage.GetRootItem()
+ if err != nil {
+ log.Printf("Error loading from database, will re-scan: %v", err)
+ } else {
+ // Signal that we're done immediately
+ a.doneChan.Broadcast()
+ return rootItem
+ }
+ }
+
+ a.ignoreDir = ignore
+ a.ignoreFileType = fileTypeFilter
+
+ // Clear existing data and store metadata
+ err := a.storage.ClearItems()
+ if err != nil {
+ log.Printf("Error clearing items: %v", err)
+ }
+ err = a.storage.SetMetadata("top_dir_path", path)
+ if err != nil {
+ log.Printf("Error setting metadata: %v", err)
+ }
+
+ // Start bulk insert transaction
+ if err := a.storage.BeginBulkInsert(); err != nil {
+ log.Printf("Error starting bulk insert: %v", err)
+ }
+
+ go a.updateProgress()
+
+ // Process directory and get the root item
+ rootItem := a.processDir(path, nil)
+
+ a.wait.Wait()
+
+ // Commit bulk insert transaction
+ if err := a.storage.EndBulkInsert(); err != nil {
+ log.Printf("Error committing bulk insert: %v", err)
+ }
+
+ a.progressDoneChan <- struct{}{}
+ a.doneChan.Broadcast()
+
+ return rootItem
+}
+
+func (a *SqliteAnalyzer) processDir(path string, parentID *int64) *SqliteItem {
+ // Start with 4096 for directory's own size/usage, matching Dir.UpdateStats behavior
+ var (
+ totalSize int64 = 4096
+ totalUsage int64 = 4096
+ filesSize int64 // only files in this directory, for progress reporting
+ itemCount int64 = 1
+ )
+
+ a.wait.Add(1)
+ defer a.wait.Done()
+
+ files, err := os.ReadDir(path)
+ if err != nil {
+ log.Print(err.Error())
+ }
+
+ // Get directory info for mtime
+ dirInfo, err := os.Stat(path)
+ var dirMtime time.Time
+ if err == nil {
+ dirMtime = dirInfo.ModTime()
+ }
+
+ // Insert directory into database (size/usage will be updated later)
+ dirID, err := a.storage.InsertItem(
+ parentID,
+ filepath.Base(path),
+ true,
+ 0, // size will be updated later
+ 0, // usage will be updated later
+ dirMtime,
+ 1, // item_count will be updated later
+ 0,
+ getDirFlag(err, len(files)),
+ )
+ if err != nil {
+ log.Print(err.Error())
+ return nil
+ }
+
+ // Process children
+ for _, f := range files {
+ name := f.Name()
+ entryPath := filepath.Join(path, name)
+
+ if f.IsDir() {
+ if a.ignoreDir(name, entryPath) {
+ continue
+ }
+
+ // Process subdirectory recursively
+ subItem := a.processDir(entryPath, &dirID)
+ if subItem != nil {
+ totalSize += subItem.size
+ totalUsage += subItem.usage
+ itemCount += subItem.itemCount
+ }
+ } else {
+ info, err := f.Info()
+ if err != nil {
+ log.Print(err.Error())
+ continue
+ }
+
+ if a.followSymlinks && info.Mode()&os.ModeSymlink != 0 {
+ infoF, err := followSymlink(entryPath, a.gitAnnexedSize)
+ if err != nil {
+ log.Print(err.Error())
+ continue
+ }
+ if infoF != nil {
+ info = infoF
+ }
+ }
+
+ // Apply time filter
+ if a.matchesTimeFilterFn != nil && !a.matchesTimeFilterFn(info.ModTime()) {
+ continue
+ }
+
+ // Apply file type filter
+ if a.ignoreFileType != nil && a.ignoreFileType(name) {
+ continue
+ }
+
+ fileSize := info.Size()
+ fileUsage, fileMli := getSyscallStats(info)
+ fileFlag := getFlag(info)
+
+ // Handle hard links: if inode already seen, don't count size
+ if fileMli != 0 && a.storage.HasInode(fileMli) {
+ fileSize = 0
+ fileUsage = 0
+ fileFlag = 'H'
+ }
+
+ _, err = a.storage.InsertItem(
+ &dirID,
+ name,
+ false,
+ fileSize,
+ fileUsage,
+ info.ModTime(),
+ 1,
+ fileMli,
+ fileFlag,
+ )
+ if err != nil {
+ log.Print(err.Error())
+ continue
+ }
+
+ totalSize += fileSize
+ totalUsage += fileUsage
+ filesSize += fileUsage
+ itemCount++
+ }
+ }
+
+ // Update directory with computed stats
+ err = a.storage.UpdateItem(dirID, totalSize, totalUsage, itemCount)
+ if err != nil {
+ log.Printf("Error updating item: %v", err)
+ }
+
+ // Report progress (only files in this dir, subdirs already reported themselves)
+ a.progressChan <- common.CurrentProgress{
+ CurrentItemName: path,
+ ItemCount: int64(len(files)),
+ TotalSize: filesSize,
+ }
+
+ // Return SqliteItem for the directory
+ return &SqliteItem{
+ storage: a.storage,
+ id: dirID,
+ parentID: parentID,
+ name: filepath.Base(path),
+ isDir: true,
+ size: totalSize,
+ usage: totalUsage,
+ mtime: dirMtime,
+ itemCount: itemCount,
+ flag: getDirFlag(err, len(files)),
+ }
+}
+
+func (a *SqliteAnalyzer) updateProgress() {
+ for {
+ select {
+ case <-a.progressDoneChan:
+ return
+ case progress := <-a.progressChan:
+ a.progress.CurrentItemName = progress.CurrentItemName
+ a.progress.ItemCount += progress.ItemCount
+ a.progress.TotalSize += progress.TotalSize
+ }
+
+ select {
+ case a.progressOutChan <- *a.progress:
+ default:
+ }
+ }
+}
--- /dev/null
+//go:build (linux && !mips64 && !mipsle && !mips && !mips64le && !ppc64) || darwin || windows || (freebsd && !arm && !386) || (openbsd && !386) || (netbsd && !arm && !386 && !amd64)
+
+package analyze
+
+import (
+ // nolint:revive // Why: importing SQLite driver for side effects
+ _ "modernc.org/sqlite"
+)
+
+// checkAvailable checks if the modernc SQLite driver is available
+func checkAvailable() error {
+ return nil
+}
--- /dev/null
+//go:build (linux && (mips64 || mipsle || mips || mips64le || ppc64)) || (freebsd && (arm || 386)) || (openbsd && 386) || (netbsd && (arm || 386 || amd64))
+
+package analyze
+
+import "errors"
+
+// checkAvailable reports that the modernc SQLite driver is not available on this platform
+func checkAvailable() error {
+ return errors.New("modernc SQLite driver is not available on this platform")
+}
--- /dev/null
+//go:build (linux && !mips64 && !mipsle && !mips && !mips64le && !ppc64) || darwin || windows || (freebsd && !arm && !386) || (openbsd && !386) || (netbsd && !arm && !386 && !amd64)
+
+package analyze
+
+import (
+ "os"
+ "path/filepath"
+ "slices"
+ "testing"
+ "time"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewSqliteStorage(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ assert.NotNil(t, storage)
+ defer storage.Close()
+
+ // Test that the database is created
+ _, err = os.Stat(dbPath)
+ assert.NoError(t, err)
+}
+
+func TestNewSqliteStorageNestedDir(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "nested", "dir", "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ assert.NotNil(t, storage)
+ defer storage.Close()
+
+ // Test that the database is created
+ _, err = os.Stat(dbPath)
+ assert.NoError(t, err)
+}
+
+func TestSqliteStorageClose(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+
+ err = storage.Close()
+ assert.NoError(t, err)
+
+ // Closing again should not error
+ err = storage.Close()
+ assert.NoError(t, err)
+}
+
+func TestSqliteStorageHasData(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ // Initially no data
+ assert.False(t, storage.HasData())
+
+ // Insert an item
+ _, err = storage.InsertItem(nil, "root", true, 100, 100, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+
+ // Now has data
+ assert.True(t, storage.HasData())
+}
+
+func TestSqliteStorageClearItems(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ // Insert an item
+ _, err = storage.InsertItem(nil, "root", true, 100, 100, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+ assert.True(t, storage.HasData())
+
+ // Clear items
+ err = storage.ClearItems()
+ assert.NoError(t, err)
+ assert.False(t, storage.HasData())
+}
+
+func TestSqliteStorageMetadata(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ // Set metadata
+ err = storage.SetMetadata("key1", "value1")
+ assert.NoError(t, err)
+
+ // Get metadata
+ value, err := storage.GetMetadata("key1")
+ assert.NoError(t, err)
+ assert.Equal(t, "value1", value)
+
+ // Update metadata
+ err = storage.SetMetadata("key1", "value2")
+ assert.NoError(t, err)
+
+ value, err = storage.GetMetadata("key1")
+ assert.NoError(t, err)
+ assert.Equal(t, "value2", value)
+
+ // Get non-existent metadata
+ _, err = storage.GetMetadata("nonexistent")
+ assert.Error(t, err)
+}
+
+func TestSqliteStorageInsertAndGetItem(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ mtime := time.Now().Truncate(time.Second)
+
+ // Insert root directory
+ rootID, err := storage.InsertItem(nil, "root", true, 1000, 2000, mtime, 5, 0, ' ')
+ assert.NoError(t, err)
+ assert.Greater(t, rootID, int64(0))
+
+ // Get root item
+ root, err := storage.GetRootItem()
+ assert.NoError(t, err)
+ assert.Equal(t, "root", root.GetName())
+ assert.True(t, root.IsDir())
+ assert.Equal(t, int64(1000), root.GetSize())
+ assert.Equal(t, int64(2000), root.GetUsage())
+ assert.Equal(t, int64(5), root.GetItemCount())
+ assert.Equal(t, ' ', root.GetFlag())
+ assert.Equal(t, mtime, root.GetMtime())
+}
+
+func TestSqliteStorageInsertAndGetChildren(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ mtime := time.Now().Truncate(time.Second)
+
+ // Insert root
+ rootID, err := storage.InsertItem(nil, "root", true, 0, 0, mtime, 1, 0, ' ')
+ assert.NoError(t, err)
+
+ // Insert children
+ _, err = storage.InsertItem(&rootID, "file1.txt", false, 100, 4096, mtime, 1, 0, ' ')
+ assert.NoError(t, err)
+ _, err = storage.InsertItem(&rootID, "file2.txt", false, 200, 4096, mtime, 1, 12345, 'H')
+ assert.NoError(t, err)
+ _, err = storage.InsertItem(&rootID, "subdir", true, 500, 8192, mtime, 3, 0, ' ')
+ assert.NoError(t, err)
+
+ // Get children
+ children, err := storage.GetChildren(rootID)
+ assert.NoError(t, err)
+ assert.Len(t, children, 3)
+
+ // Verify children names
+ names := make([]string, len(children))
+ for i, child := range children {
+ names[i] = child.GetName()
+ }
+ assert.Contains(t, names, "file1.txt")
+ assert.Contains(t, names, "file2.txt")
+ assert.Contains(t, names, "subdir")
+}
+
+func TestSqliteStorageUpdateItem(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ // Insert item
+ id, err := storage.InsertItem(nil, "dir", true, 100, 200, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+
+ // Update item
+ err = storage.UpdateItem(id, 500, 1000, 10)
+ assert.NoError(t, err)
+
+ // Verify update
+ item, err := storage.GetItemByID(id)
+ assert.NoError(t, err)
+ assert.Equal(t, int64(500), item.GetSize())
+ assert.Equal(t, int64(1000), item.GetUsage())
+ assert.Equal(t, int64(10), item.GetItemCount())
+}
+
+func TestSqliteStorageBulkInsert(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ // Begin bulk insert
+ err = storage.BeginBulkInsert()
+ assert.NoError(t, err)
+
+ // Insert many items
+ rootID, err := storage.InsertItem(nil, "root", true, 0, 0, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+
+ for i := 0; i < 100; i++ {
+ _, err = storage.InsertItem(&rootID, "file", false, 100, 4096, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+ }
+
+ // Update during bulk mode
+ err = storage.UpdateItem(rootID, 10000, 20000, 101)
+ assert.NoError(t, err)
+
+ // End bulk insert
+ err = storage.EndBulkInsert()
+ assert.NoError(t, err)
+
+ // Verify
+ children, err := storage.GetChildren(rootID)
+ assert.NoError(t, err)
+ assert.Len(t, children, 100)
+}
+
+func TestSqliteStorageHasInode(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ // No inode initially
+ assert.False(t, storage.HasInode(12345))
+
+ // Insert item with inode
+ _, err = storage.InsertItem(nil, "file", false, 100, 4096, time.Now(), 1, 12345, 'H')
+ assert.NoError(t, err)
+
+ // Now inode exists
+ assert.True(t, storage.HasInode(12345))
+ assert.False(t, storage.HasInode(99999))
+}
+
+func TestSqliteStorageHasInodeBulkMode(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ err = storage.BeginBulkInsert()
+ assert.NoError(t, err)
+
+ // Insert item with inode in bulk mode
+ _, err = storage.InsertItem(nil, "file", false, 100, 4096, time.Now(), 1, 12345, 'H')
+ assert.NoError(t, err)
+
+ // Check inode during bulk mode (uses prepared statement)
+ assert.True(t, storage.HasInode(12345))
+ assert.False(t, storage.HasInode(99999))
+
+ err = storage.EndBulkInsert()
+ assert.NoError(t, err)
+}
+
+func TestSqliteItemGetPath(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ // Set up metadata for path resolution
+ err = storage.SetMetadata("top_dir_path", "/home/user/testdir")
+ assert.NoError(t, err)
+
+ // Insert root
+ rootID, err := storage.InsertItem(nil, "testdir", true, 0, 0, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+
+ // Insert child
+ childID, err := storage.InsertItem(&rootID, "file.txt", false, 100, 4096, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+
+ // Get root item
+ root, err := storage.GetItemByID(rootID)
+ assert.NoError(t, err)
+ assert.Equal(t, "/home/user/testdir", root.GetPath())
+
+ // Get child and set parent
+ child, err := storage.GetItemByID(childID)
+ assert.NoError(t, err)
+ child.SetParent(root)
+ assert.Equal(t, "/home/user/testdir/file.txt", child.GetPath())
+}
+
+func TestSqliteItemGetType(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ // Directory
+ dirID, err := storage.InsertItem(nil, "dir", true, 0, 0, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+ dir, _ := storage.GetItemByID(dirID)
+ assert.Equal(t, "Directory", dir.GetType())
+
+ // File
+ fileID, err := storage.InsertItem(nil, "file", false, 100, 4096, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+ file, _ := storage.GetItemByID(fileID)
+ assert.Equal(t, "File", file.GetType())
+
+ // Other (symlink flag)
+ otherID, err := storage.InsertItem(nil, "symlink", false, 100, 4096, time.Now(), 1, 0, '@')
+ assert.NoError(t, err)
+ other, _ := storage.GetItemByID(otherID)
+ assert.Equal(t, "Other", other.GetType())
+}
+
+func TestSqliteItemGetParent(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ // Insert root and child
+ rootID, err := storage.InsertItem(nil, "root", true, 0, 0, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+ childID, err := storage.InsertItem(&rootID, "child", false, 100, 4096, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+
+ // Get child
+ child, err := storage.GetItemByID(childID)
+ assert.NoError(t, err)
+
+ // Get parent (lazy loaded)
+ parent := child.GetParent()
+ assert.NotNil(t, parent)
+ assert.Equal(t, "root", parent.GetName())
+
+ // Second call should use cached parent
+ parent2 := child.GetParent()
+ assert.Equal(t, parent, parent2)
+
+ // Root item has no parent
+ root, err := storage.GetItemByID(rootID)
+ assert.NoError(t, err)
+ assert.Nil(t, root.GetParent())
+}
+
+func TestSqliteItemGetMultiLinkedInode(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ // Insert item with inode
+ id, err := storage.InsertItem(nil, "file", false, 100, 4096, time.Now(), 1, 12345, 'H')
+ assert.NoError(t, err)
+
+ item, err := storage.GetItemByID(id)
+ assert.NoError(t, err)
+ assert.Equal(t, uint64(12345), item.GetMultiLinkedInode())
+}
+
+func TestSqliteItemGetFiles(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ // Insert root and children with different usages (SortBySize sorts by usage)
+ rootID, err := storage.InsertItem(nil, "root", true, 0, 0, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+ _, err = storage.InsertItem(&rootID, "small.txt", false, 100, 1000, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+ _, err = storage.InsertItem(&rootID, "large.txt", false, 1000, 9000, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+ _, err = storage.InsertItem(&rootID, "medium.txt", false, 500, 5000, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+
+ root, err := storage.GetItemByID(rootID)
+ assert.NoError(t, err)
+
+ // Sort by name ascending (alphabetical)
+ files := slices.Collect(root.GetFiles(fs.SortByName, fs.SortAsc))
+ assert.Len(t, files, 3)
+ assert.Equal(t, "large.txt", files[0].GetName())
+ assert.Equal(t, "medium.txt", files[1].GetName())
+ assert.Equal(t, "small.txt", files[2].GetName())
+
+ // Sort by size descending (largest usage first)
+ files = slices.Collect(root.GetFiles(fs.SortBySize, fs.SortDesc))
+ assert.Len(t, files, 3)
+ assert.Equal(t, "large.txt", files[0].GetName())
+ assert.Equal(t, "medium.txt", files[1].GetName())
+ assert.Equal(t, "small.txt", files[2].GetName())
+}
+
+func TestSqliteItemGetFilesLocked(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ rootID, err := storage.InsertItem(nil, "root", true, 0, 0, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+ _, err = storage.InsertItem(&rootID, "file.txt", false, 100, 4096, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+
+ root, err := storage.GetItemByID(rootID)
+ assert.NoError(t, err)
+
+ // GetFilesLocked should work same as GetFiles
+ files := slices.Collect(root.GetFilesLocked(fs.SortByName, fs.SortAsc))
+ assert.Len(t, files, 1)
+ assert.Equal(t, "file.txt", files[0].GetName())
+}
+
+func TestSqliteItemRLock(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ id, err := storage.InsertItem(nil, "root", true, 0, 0, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+
+ item, err := storage.GetItemByID(id)
+ assert.NoError(t, err)
+
+ // RLock should return unlock function
+ unlock := item.RLock()
+ assert.NotNil(t, unlock)
+ unlock()
+}
+
+func TestSqliteItemGetItemStats(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ id, err := storage.InsertItem(nil, "dir", true, 1000, 2000, time.Now(), 5, 0, ' ')
+ assert.NoError(t, err)
+
+ item, err := storage.GetItemByID(id)
+ assert.NoError(t, err)
+
+ count, size, usage := item.GetItemStats(make(fs.HardLinkedItems))
+ assert.Equal(t, int64(5), count)
+ assert.Equal(t, int64(1000), size)
+ assert.Equal(t, int64(2000), usage)
+}
+
+func TestSqliteItemUpdateStats(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ id, err := storage.InsertItem(nil, "dir", true, 1000, 2000, time.Now(), 5, 0, ' ')
+ assert.NoError(t, err)
+
+ item, err := storage.GetItemByID(id)
+ assert.NoError(t, err)
+
+ // UpdateStats is a no-op for SqliteItem
+ item.UpdateStats(make(fs.HardLinkedItems))
+ // Just verify it doesn't panic
+}
+
+func TestSqliteItemAddFile(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ id, err := storage.InsertItem(nil, "dir", true, 0, 0, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+
+ item, err := storage.GetItemByID(id)
+ assert.NoError(t, err)
+
+ // AddFile is a no-op for SqliteItem
+ item.AddFile(nil)
+ // Just verify it doesn't panic
+}
+
+func TestSqliteItemRemoveFile(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ id, err := storage.InsertItem(nil, "dir", true, 0, 0, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+
+ item, err := storage.GetItemByID(id)
+ assert.NoError(t, err)
+
+ // RemoveFile is a no-op for SqliteItem
+ item.RemoveFile(nil)
+ // Just verify it doesn't panic
+}
+
+func TestSqliteItemRemoveFileByName(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ id, err := storage.InsertItem(nil, "dir", true, 0, 0, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+
+ item, err := storage.GetItemByID(id)
+ assert.NoError(t, err)
+
+ // RemoveFileByName is a no-op for SqliteItem
+ item.RemoveFileByName("test")
+ // Just verify it doesn't panic
+}
+
+func TestSqliteItemEncodeJSON(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ storage, err := NewSqliteStorage(dbPath)
+ assert.NoError(t, err)
+ defer storage.Close()
+
+ id, err := storage.InsertItem(nil, "dir", true, 0, 0, time.Now(), 1, 0, ' ')
+ assert.NoError(t, err)
+
+ item, err := storage.GetItemByID(id)
+ assert.NoError(t, err)
+
+ // EncodeJSON returns nil (simplified implementation)
+ err = item.EncodeJSON(nil, false)
+ assert.NoError(t, err)
+}
+
+func TestCreateSqliteAnalyzer(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ assert.NotNil(t, analyzer)
+ defer analyzer.storage.Close()
+}
+
+func TestSqliteAnalyzerSetFollowSymlinks(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ analyzer.SetFollowSymlinks(true)
+ assert.True(t, analyzer.followSymlinks)
+ analyzer.SetFollowSymlinks(false)
+ assert.False(t, analyzer.followSymlinks)
+}
+
+func TestSqliteAnalyzerSetShowAnnexedSize(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ analyzer.SetShowAnnexedSize(true)
+ assert.True(t, analyzer.gitAnnexedSize)
+ analyzer.SetShowAnnexedSize(false)
+ assert.False(t, analyzer.gitAnnexedSize)
+}
+
+func TestSqliteAnalyzerSetArchiveBrowsing(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ analyzer.SetArchiveBrowsing(true)
+ assert.True(t, analyzer.archiveBrowsing)
+ analyzer.SetArchiveBrowsing(false)
+ assert.False(t, analyzer.archiveBrowsing)
+}
+
+func TestSqliteAnalyzerSetTimeFilter(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ filter := func(mtime time.Time) bool { return true }
+ analyzer.SetTimeFilter(filter)
+ assert.NotNil(t, analyzer.matchesTimeFilterFn)
+}
+
+func TestSqliteAnalyzerSetFileTypeFilter(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ filter := func(name string) bool { return false }
+ analyzer.SetFileTypeFilter(filter)
+ assert.NotNil(t, analyzer.ignoreFileType)
+}
+
+func TestSqliteAnalyzerGetProgressChan(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ progressChan := analyzer.GetProgressChan()
+ assert.NotNil(t, progressChan)
+}
+
+func TestSqliteAnalyzerGetDone(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ doneChan := analyzer.GetDone()
+ assert.NotNil(t, doneChan)
+}
+
+func TestSqliteAnalyzerResetProgress(t *testing.T) {
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ analyzer.ResetProgress()
+ assert.NotNil(t, analyzer.progress)
+ assert.NotNil(t, analyzer.progressChan)
+ assert.NotNil(t, analyzer.progressOutChan)
+ assert.NotNil(t, analyzer.doneChan)
+}
+
+func TestSqliteAnalyzerAnalyzeDir(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*SqliteItem)
+
+ analyzer.GetDone().Wait()
+
+ // Test dir info
+ assert.Equal(t, "test_dir", dir.GetName())
+ assert.True(t, dir.IsDir())
+ assert.Equal(t, int64(5), dir.GetItemCount())
+ // Size should include directory overhead + file sizes: 4096*3 + 7 bytes
+ assert.Equal(t, int64(7+4096*3), dir.GetSize())
+
+ // Test dir tree
+ files := slices.Collect(dir.GetFiles(fs.SortByName, fs.SortAsc))
+ assert.Equal(t, 1, len(files))
+ assert.Equal(t, "nested", files[0].GetName())
+
+ nested := files[0].(*SqliteItem)
+ nestedFiles := slices.Collect(nested.GetFiles(fs.SortByName, fs.SortAsc))
+ assert.Equal(t, 2, len(nestedFiles))
+ assert.Equal(t, "file2", nestedFiles[0].GetName())
+ assert.Equal(t, "subnested", nestedFiles[1].GetName())
+
+ // Test file
+ assert.Equal(t, int64(2), nestedFiles[0].GetSize())
+
+ subnested := nestedFiles[1].(*SqliteItem)
+ subnestedFiles := slices.Collect(subnested.GetFiles(fs.SortByName, fs.SortAsc))
+ assert.Equal(t, "file", subnestedFiles[0].GetName())
+ assert.Equal(t, int64(5), subnestedFiles[0].GetSize())
+}
+
+func TestSqliteAnalyzerIgnoreDir(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return true }, func(_ string) bool { return false },
+ ).(*SqliteItem)
+
+ analyzer.GetDone().Wait()
+
+ assert.Equal(t, "test_dir", dir.GetName())
+ assert.Equal(t, int64(1), dir.GetItemCount())
+}
+
+func TestSqliteAnalyzerIgnoreFileType(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ // Ignore all files
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return true },
+ ).(*SqliteItem)
+
+ analyzer.GetDone().Wait()
+
+ // Only directories should remain
+ assert.Equal(t, "test_dir", dir.GetName())
+ assert.Equal(t, int64(3), dir.GetItemCount()) // test_dir, nested, subnested
+}
+
+func TestSqliteAnalyzerHardlinks(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ // Create hard link
+ err := os.Link("test_dir/nested/file2", "test_dir/nested/file3")
+ assert.NoError(t, err)
+
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*SqliteItem)
+
+ analyzer.GetDone().Wait()
+
+ // file2 and file3 are counted just once for size but twice for item count
+ assert.Equal(t, int64(7+4096*3), dir.GetSize())
+ assert.Equal(t, int64(6), dir.GetItemCount())
+
+ // Check hard link flag
+ nested := slices.Collect(dir.GetFiles(fs.SortByName, fs.SortAsc))[0].(*SqliteItem)
+ nestedFiles := slices.Collect(nested.GetFiles(fs.SortByName, fs.SortAsc))
+
+ var file3 *SqliteItem
+ for _, f := range nestedFiles {
+ if f.GetName() == "file3" {
+ file3 = f.(*SqliteItem)
+ break
+ }
+ }
+ assert.NotNil(t, file3)
+ assert.Equal(t, 'H', file3.GetFlag())
+}
+
+func TestSqliteAnalyzerSymlink(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ // Create symlink
+ err := os.Symlink("test_dir/nested/file2", "test_dir/nested/file3")
+ assert.NoError(t, err)
+
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*SqliteItem)
+
+ analyzer.GetDone().Wait()
+
+ // Check symlink flag
+ nested := slices.Collect(dir.GetFiles(fs.SortByName, fs.SortAsc))[0].(*SqliteItem)
+ nestedFiles := slices.Collect(nested.GetFiles(fs.SortByName, fs.SortAsc))
+
+ var file3 *SqliteItem
+ for _, f := range nestedFiles {
+ if f.GetName() == "file3" {
+ file3 = f.(*SqliteItem)
+ break
+ }
+ }
+ assert.NotNil(t, file3)
+ assert.Equal(t, '@', file3.GetFlag())
+}
+
+func TestSqliteAnalyzerFollowSymlink(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ // Create symlink to file2
+ err := os.Symlink("./file2", "test_dir/nested/file3")
+ assert.NoError(t, err)
+
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ analyzer.SetFollowSymlinks(true)
+
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*SqliteItem)
+
+ analyzer.GetDone().Wait()
+
+ // With followSymlinks, file3 should have same size as file2
+ nested := slices.Collect(dir.GetFiles(fs.SortByName, fs.SortAsc))[0].(*SqliteItem)
+ nestedFiles := slices.Collect(nested.GetFiles(fs.SortByName, fs.SortAsc))
+
+ var file3 *SqliteItem
+ for _, f := range nestedFiles {
+ if f.GetName() == "file3" {
+ file3 = f.(*SqliteItem)
+ break
+ }
+ }
+ assert.NotNil(t, file3)
+ assert.Equal(t, int64(2), file3.GetSize())
+ assert.Equal(t, ' ', file3.GetFlag()) // Not a symlink flag when followed
+}
+
+func TestSqliteAnalyzerTimeFilter(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ // Filter out all files (mtime filter that always returns false)
+ analyzer.SetTimeFilter(func(mtime time.Time) bool { return false })
+
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*SqliteItem)
+
+ analyzer.GetDone().Wait()
+
+ // Only directories should remain
+ assert.Equal(t, int64(3), dir.GetItemCount()) // test_dir, nested, subnested
+}
+
+func TestSqliteAnalyzerLoadFromExisting(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+
+ // First analysis
+ analyzer1, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+
+ dir1 := analyzer1.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*SqliteItem)
+ analyzer1.GetDone().Wait()
+
+ assert.Equal(t, "test_dir", dir1.GetName())
+ assert.Equal(t, int64(5), dir1.GetItemCount())
+
+ analyzer1.storage.Close()
+
+ // Second analysis should load from existing data
+ analyzer2, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer2.storage.Close()
+
+ dir2 := analyzer2.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*SqliteItem)
+ analyzer2.GetDone().Wait()
+
+ assert.Equal(t, "test_dir", dir2.GetName())
+ assert.Equal(t, int64(5), dir2.GetItemCount())
+}
+
+func TestSqliteAnalyzerProgress(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ dbPath := filepath.Join(t.TempDir(), "test.db")
+ analyzer, err := CreateSqliteAnalyzer(dbPath)
+ assert.NoError(t, err)
+ defer analyzer.storage.Close()
+
+ // Start analysis in goroutine
+ go func() {
+ analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ )
+ }()
+
+ // Receive at least one progress update
+ select {
+ case progress := <-analyzer.GetProgressChan():
+ assert.GreaterOrEqual(t, progress.TotalSize, int64(0))
+ case <-time.After(5 * time.Second):
+ t.Fatal("Timeout waiting for progress")
+ }
+
+ analyzer.GetDone().Wait()
+}
+
+func BenchmarkSqliteAnalyzeDir(b *testing.B) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ for i := 0; i < b.N; i++ {
+ dbPath := filepath.Join(b.TempDir(), "test.db")
+ analyzer, _ := CreateSqliteAnalyzer(dbPath)
+ analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ )
+ analyzer.GetDone().Wait()
+ analyzer.storage.Close()
+ }
+}
--- /dev/null
+package analyze
+
+import (
+ "bytes"
+ "encoding/gob"
+ "path/filepath"
+ "sync"
+
+ "github.com/dgraph-io/badger/v4"
+ "github.com/dundee/gdu/v5/internal/common"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/pkg/errors"
+)
+
+func init() {
+ gob.RegisterName("analyze.StoredDir", &StoredDir{})
+ gob.RegisterName("analyze.Dir", &Dir{})
+ gob.RegisterName("analyze.File", &File{})
+ gob.RegisterName("analyze.ParentDir", &ParentDir{})
+}
+
+// DefaultStorage is a default instance of badger storage
+var DefaultStorage *Storage
+
+// Storage represents a badger storage
+type Storage struct {
+ db *badger.DB
+ storagePath string
+ topDir string
+ m sync.RWMutex
+ counter int
+ counterM sync.Mutex
+}
+
+// NewStorage returns new instance of badger storage
+func NewStorage(storagePath, topDir string) *Storage {
+ st := &Storage{
+ storagePath: storagePath,
+ topDir: topDir,
+ }
+ DefaultStorage = st
+ return st
+}
+
+// GetTopDir returns top directory
+func (s *Storage) GetTopDir() string {
+ return s.topDir
+}
+
+// IsOpen returns true if badger DB is open
+func (s *Storage) IsOpen() bool {
+ s.m.RLock()
+ defer s.m.RUnlock()
+ return s.db != nil
+}
+
+// Open opens badger DB
+func (s *Storage) Open() func() {
+ options := badger.DefaultOptions(s.storagePath)
+ options.Logger = nil
+
+ if !common.Is64Bit {
+ // For 32-bit systems, we need to set ValueLogFileSize to a smaller value to
+ // avoid "cannot allocate memory while mmapping" error
+ options.ValueLogFileSize = (1<<30 - 1) / 2
+ }
+
+ db, err := badger.Open(options)
+ if err != nil {
+ panic(err)
+ }
+ s.db = db
+
+ return func() {
+ s.db.Close()
+ s.db = nil
+ }
+}
+
+// StoreDir saves item info into badger DB
+func (s *Storage) StoreDir(dir fs.Item) error {
+ s.checkCount()
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ return s.db.Update(func(txn *badger.Txn) error {
+ b := &bytes.Buffer{}
+ enc := gob.NewEncoder(b)
+ err := enc.Encode(dir)
+ if err != nil {
+ return errors.Wrap(err, "encoding dir value")
+ }
+
+ return txn.Set([]byte(dir.GetPath()), b.Bytes())
+ })
+}
+
+// LoadDir saves item info into badger DB
+func (s *Storage) LoadDir(dir fs.Item) error {
+ s.checkCount()
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ return s.db.View(func(txn *badger.Txn) error {
+ path := dir.GetPath()
+ item, err := txn.Get([]byte(path))
+ if err != nil {
+ return errors.Wrap(err, "reading stored value for path: "+path)
+ }
+ return item.Value(func(val []byte) error {
+ b := bytes.NewBuffer(val)
+ dec := gob.NewDecoder(b)
+ return dec.Decode(dir)
+ })
+ })
+}
+
+// GetDirForPath returns Dir for given path
+func (s *Storage) GetDirForPath(path string) (item fs.Item, err error) {
+ dirPath := filepath.Dir(path)
+ name := filepath.Base(path)
+ dir := &StoredDir{
+ &Dir{
+ File: &File{
+ Name: name,
+ },
+ BasePath: dirPath,
+ },
+ nil,
+ sync.Mutex{},
+ }
+ err = s.LoadDir(dir)
+ if err != nil {
+ return nil, err
+ }
+ return dir, nil
+}
+
+func (s *Storage) checkCount() {
+ s.counterM.Lock()
+ defer s.counterM.Unlock()
+ s.counter++
+ if s.counter >= 10000 {
+ s.m.Lock()
+ defer s.m.Unlock()
+ s.counter = 0
+ s.db.Close()
+ s.Open()
+ }
+}
--- /dev/null
+package analyze
+
+import (
+ "io"
+ "iter"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/dundee/gdu/v5/internal/common"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ log "github.com/sirupsen/logrus"
+)
+
+// StoredAnalyzer implements Analyzer
+type StoredAnalyzer struct {
+ storage *Storage
+ progress *common.CurrentProgress
+ progressChan chan common.CurrentProgress
+ progressOutChan chan common.CurrentProgress
+ progressDoneChan chan struct{}
+ doneChan common.SignalGroup
+ wait *WaitGroup
+ ignoreDir common.ShouldDirBeIgnored
+ ignoreFileType common.ShouldFileBeIgnored
+ storagePath string
+ followSymlinks bool
+ gitAnnexedSize bool
+ matchesTimeFilterFn common.TimeFilter
+ archiveBrowsing bool
+}
+
+// CreateStoredAnalyzer returns Analyzer
+func CreateStoredAnalyzer(storagePath string) *StoredAnalyzer {
+ return &StoredAnalyzer{
+ storagePath: storagePath,
+ progress: &common.CurrentProgress{
+ ItemCount: 0,
+ TotalSize: int64(0),
+ },
+ progressChan: make(chan common.CurrentProgress, 1),
+ progressOutChan: make(chan common.CurrentProgress, 1),
+ progressDoneChan: make(chan struct{}),
+ doneChan: make(common.SignalGroup),
+ wait: (&WaitGroup{}).Init(),
+ }
+}
+
+// GetProgressChan returns channel for getting progress
+func (a *StoredAnalyzer) GetProgressChan() chan common.CurrentProgress {
+ return a.progressOutChan
+}
+
+// GetDone returns channel for checking when analysis is done
+func (a *StoredAnalyzer) GetDone() common.SignalGroup {
+ return a.doneChan
+}
+
+func (a *StoredAnalyzer) SetFollowSymlinks(v bool) {
+ a.followSymlinks = v
+}
+
+func (a *StoredAnalyzer) SetShowAnnexedSize(v bool) {
+ a.gitAnnexedSize = v
+}
+
+// SetTimeFilter sets the time filter function for file inclusion
+func (a *StoredAnalyzer) SetTimeFilter(matchesTimeFilterFn common.TimeFilter) {
+ a.matchesTimeFilterFn = matchesTimeFilterFn
+}
+
+// SetArchiveBrowsing sets whether browsing of zip/jar archives is enabled
+func (a *StoredAnalyzer) SetArchiveBrowsing(v bool) {
+ a.archiveBrowsing = v
+}
+
+// SetFileTypeFilter sets the file type filter function
+func (a *StoredAnalyzer) SetFileTypeFilter(filter common.ShouldFileBeIgnored) {
+ a.ignoreFileType = filter
+}
+
+// ResetProgress returns progress
+func (a *StoredAnalyzer) ResetProgress() {
+ a.progress = &common.CurrentProgress{}
+ a.progressChan = make(chan common.CurrentProgress, 1)
+ a.progressOutChan = make(chan common.CurrentProgress, 1)
+ a.progressDoneChan = make(chan struct{})
+ a.doneChan = make(common.SignalGroup)
+ a.wait = (&WaitGroup{}).Init()
+}
+
+// AnalyzeDir analyzes given path
+func (a *StoredAnalyzer) AnalyzeDir(
+ path string, ignore common.ShouldDirBeIgnored, fileTypeFilter common.ShouldFileBeIgnored,
+) fs.Item {
+ a.ignoreDir = ignore
+ a.ignoreFileType = fileTypeFilter
+
+ a.storage = NewStorage(a.storagePath, path)
+ closeFn := a.storage.Open()
+ defer func() {
+ // nasty hack to close storage after all goroutines are done
+ // Wait returns immediately if value is 0
+ // few last goroutines might still start after that
+ time.Sleep(1 * time.Second)
+ closeFn()
+ }()
+
+ a.ignoreDir = ignore
+
+ go a.updateProgress()
+ dir := a.processDir(path)
+
+ a.wait.Wait()
+
+ a.progressDoneChan <- struct{}{}
+ a.doneChan.Broadcast()
+
+ return dir
+}
+
+func (a *StoredAnalyzer) processDir(path string) *StoredDir {
+ var (
+ file fs.Item
+ err error
+ totalSize int64
+ info os.FileInfo
+ dirCount int
+ )
+
+ a.wait.Add(1)
+
+ files, err := os.ReadDir(path)
+ if err != nil {
+ log.Print(err.Error())
+ }
+
+ dir := &StoredDir{
+ Dir: &Dir{
+ File: &File{
+ Name: filepath.Base(path),
+ Flag: getDirFlag(err, len(files)),
+ },
+ BasePath: filepath.Dir(path),
+ ItemCount: 1,
+ Files: make(fs.Files, 0, len(files)),
+ },
+ }
+ parent := &ParentDir{Path: path}
+
+ setDirPlatformSpecificAttrs(dir.Dir, path)
+
+ for _, f := range files {
+ name := f.Name()
+ entryPath := filepath.Join(path, name)
+ if f.IsDir() {
+ if a.ignoreDir(name, entryPath) {
+ continue
+ }
+ dirCount++
+
+ subdir := &StoredDir{
+ &Dir{
+ File: &File{
+ Name: name,
+ },
+ BasePath: path,
+ },
+ nil,
+ sync.Mutex{},
+ }
+ dir.AddFile(subdir)
+
+ go func(entryPath string) {
+ concurrencyLimit <- struct{}{}
+ a.processDir(entryPath)
+ <-concurrencyLimit
+ }(entryPath)
+ } else {
+ info, err = f.Info()
+ if err != nil {
+ log.Print(err.Error())
+ continue
+ }
+
+ // Check if it's a zip or jar file
+ if a.archiveBrowsing && isZipFile(name) {
+ zipDir, err := processZipFile(entryPath, info)
+ if err != nil {
+ // If unable to process zip file, treat as regular file
+ log.Printf("Failed to process zip file %s: %v", entryPath, err)
+ file = &File{
+ Name: name,
+ Flag: getFlag(info),
+ Size: info.Size(),
+ Parent: parent,
+ }
+ } else {
+ // Successfully processed zip file, use zip content size
+ uncompressedSize, compressedSize, err := getZipFileSize(entryPath)
+ if err == nil {
+ zipDir.Size = uncompressedSize
+ zipDir.Usage = compressedSize
+ }
+ zipDir.Parent = parent
+ file = zipDir
+ }
+ } else {
+ file = &File{
+ Name: name,
+ Flag: getFlag(info),
+ Size: info.Size(),
+ Parent: parent,
+ }
+ }
+
+ // Apply time filter if set
+ if a.matchesTimeFilterFn != nil && !a.matchesTimeFilterFn(info.ModTime()) {
+ continue // Skip this file
+ }
+
+ // Apply file type filter if set
+ if a.ignoreFileType != nil && a.ignoreFileType(name) {
+ continue // Skip this file
+ }
+
+ if file != nil {
+ // Only set platform-specific attributes for regular files
+ if regularFile, ok := file.(*File); ok {
+ setPlatformSpecificAttrs(regularFile, info)
+ }
+ totalSize += file.GetUsage()
+ dir.AddFile(file)
+ }
+ }
+ }
+
+ err = a.storage.StoreDir(dir)
+ if err != nil {
+ log.Print(err.Error())
+ }
+
+ a.progressChan <- common.CurrentProgress{
+ CurrentItemName: path,
+ ItemCount: int64(len(files)),
+ TotalSize: totalSize,
+ }
+
+ a.wait.Done()
+ return dir
+}
+
+func (a *StoredAnalyzer) updateProgress() {
+ for {
+ select {
+ case <-a.progressDoneChan:
+ return
+ case progress := <-a.progressChan:
+ a.progress.CurrentItemName = progress.CurrentItemName
+ a.progress.ItemCount += progress.ItemCount
+ a.progress.TotalSize += progress.TotalSize
+ }
+
+ select {
+ case a.progressOutChan <- *a.progress:
+ default:
+ }
+ }
+}
+
+// StoredDir implements Dir item stored on disk
+type StoredDir struct {
+ *Dir
+ cachedFiles fs.Files
+ dbLock sync.Mutex
+}
+
+// GetParent returns parent dir
+func (f *StoredDir) GetParent() fs.Item {
+ if DefaultStorage.GetTopDir() == f.GetPath() {
+ return nil
+ }
+
+ if !DefaultStorage.IsOpen() {
+ closeFn := DefaultStorage.Open()
+ defer closeFn()
+ }
+
+ dir, err := DefaultStorage.GetDirForPath(f.BasePath)
+ if err != nil {
+ log.Print(err.Error())
+ }
+ return dir
+}
+
+// GetFiles returns files in directory as a sorted iterator
+// If files are already cached, use them
+// Otherwise load them from storage
+func (f *StoredDir) GetFiles(sortBy fs.SortBy, order fs.SortOrder) iter.Seq[fs.Item] {
+ return func(yield func(fs.Item) bool) {
+ files := f.loadFiles()
+ sortFiles(files, sortBy, order)
+
+ for _, item := range files {
+ if !yield(item) {
+ return
+ }
+ }
+ }
+}
+
+// loadFiles loads files from storage or returns cached files
+func (f *StoredDir) loadFiles() fs.Files {
+ if f.cachedFiles != nil {
+ // Return a copy to avoid modifying cached slice
+ result := make(fs.Files, len(f.cachedFiles))
+ copy(result, f.cachedFiles)
+ return result
+ }
+
+ if !DefaultStorage.IsOpen() {
+ f.dbLock.Lock()
+ defer f.dbLock.Unlock()
+ closeFn := DefaultStorage.Open()
+ defer closeFn()
+ }
+
+ var files fs.Files
+ for _, file := range f.Files {
+ if file.IsDir() {
+ dir := &StoredDir{
+ &Dir{
+ File: &File{
+ Name: file.GetName(),
+ },
+ BasePath: f.GetPath(),
+ },
+ nil,
+ sync.Mutex{},
+ }
+
+ err := DefaultStorage.LoadDir(dir)
+ if err != nil {
+ log.Print(err.Error())
+ }
+ files = append(files, dir)
+ } else {
+ files = append(files, file)
+ }
+ }
+
+ f.cachedFiles = files
+ // Return a copy to avoid modifying cached slice
+ result := make(fs.Files, len(files))
+ copy(result, files)
+ return result
+}
+
+// RemoveFile removes file from stored directory
+// It also updates size and item count of parent directories
+func (f *StoredDir) RemoveFile(item fs.Item) {
+ if !DefaultStorage.IsOpen() {
+ f.dbLock.Lock()
+ defer f.dbLock.Unlock()
+ closeFn := DefaultStorage.Open()
+ defer closeFn()
+ }
+
+ f.Files = f.Files.Remove(item)
+ f.cachedFiles = nil
+
+ cur := f
+ for {
+ cur.ItemCount -= item.GetItemCount()
+ cur.Size -= item.GetSize()
+ cur.Usage -= item.GetUsage()
+
+ err := DefaultStorage.StoreDir(cur)
+ if err != nil {
+ log.Print(err.Error())
+ }
+
+ parent := cur.GetParent()
+ if parent == nil {
+ break
+ }
+ cur = parent.(*StoredDir)
+ }
+}
+
+// GetItemStats returns item count, apparent usage and real usage of this dir
+func (f *StoredDir) GetItemStats(linkedItems fs.HardLinkedItems) (itemCount, size, usage int64) {
+ f.UpdateStats(linkedItems)
+ return f.ItemCount, f.GetSize(), f.GetUsage()
+}
+
+// UpdateStats recursively updates size and item count
+func (f *StoredDir) UpdateStats(linkedItems fs.HardLinkedItems) {
+ if !DefaultStorage.IsOpen() {
+ closeFn := DefaultStorage.Open()
+ defer closeFn()
+ }
+
+ totalSize := int64(4096)
+ totalUsage := int64(4096)
+ var itemCount int64
+ f.cachedFiles = nil
+ files := f.loadFiles()
+ for _, entry := range files {
+ count, size, usage := entry.GetItemStats(linkedItems)
+ totalSize += size
+ totalUsage += usage
+ itemCount += count
+
+ if entry.GetMtime().After(f.Mtime) {
+ f.Mtime = entry.GetMtime()
+ }
+
+ switch entry.GetFlag() {
+ case '!', '.':
+ if f.Flag != '!' {
+ f.Flag = '.'
+ }
+ }
+ }
+ f.cachedFiles = nil
+ f.ItemCount = itemCount + 1
+ f.Size = totalSize
+ f.Usage = totalUsage
+ err := DefaultStorage.StoreDir(f)
+ if err != nil {
+ log.Print(err.Error())
+ }
+}
+
+// RemoveFileByName removes file by name from stored directory
+func (f *StoredDir) RemoveFileByName(name string) {
+ if !DefaultStorage.IsOpen() {
+ f.dbLock.Lock()
+ defer f.dbLock.Unlock()
+ closeFn := DefaultStorage.Open()
+ defer closeFn()
+ }
+
+ idx, ok := f.Files.FindByName(name)
+ if !ok {
+ return
+ }
+ item := f.Files[idx]
+ f.Files = append(f.Files[:idx], f.Files[idx+1:]...)
+ f.cachedFiles = nil
+
+ cur := f
+ for {
+ cur.ItemCount -= item.GetItemCount()
+ cur.Size -= item.GetSize()
+ cur.Usage -= item.GetUsage()
+
+ err := DefaultStorage.StoreDir(cur)
+ if err != nil {
+ log.Print(err.Error())
+ }
+
+ parent := cur.GetParent()
+ if parent == nil {
+ break
+ }
+ cur = parent.(*StoredDir)
+ }
+}
+
+// ParentDir represents parent directory of single file
+// It is used to get path to parent directory of a file
+type ParentDir struct {
+ Path string
+}
+
+func (p *ParentDir) GetPath() string {
+ return p.Path
+}
+func (p *ParentDir) GetName() string { panic("must not be called") }
+func (p *ParentDir) GetFlag() rune { panic("must not be called") }
+func (p *ParentDir) IsDir() bool { panic("must not be called") }
+func (p *ParentDir) GetSize() int64 { panic("must not be called") }
+func (p *ParentDir) GetType() string { panic("must not be called") }
+func (p *ParentDir) GetUsage() int64 { panic("must not be called") }
+func (p *ParentDir) GetMtime() time.Time { panic("must not be called") }
+func (p *ParentDir) GetItemCount() int64 { panic("must not be called") }
+func (p *ParentDir) GetParent() fs.Item { panic("must not be called") }
+func (p *ParentDir) SetParent(fs.Item) { panic("must not be called") }
+func (p *ParentDir) GetMultiLinkedInode() uint64 { panic("must not be called") }
+func (p *ParentDir) EncodeJSON(writer io.Writer, topLevel bool) error { panic("must not be called") }
+func (p *ParentDir) UpdateStats(linkedItems fs.HardLinkedItems) { panic("must not be called") }
+func (p *ParentDir) AddFile(fs.Item) { panic("must not be called") }
+func (p *ParentDir) GetFiles(fs.SortBy, fs.SortOrder) iter.Seq[fs.Item] { panic("must not be called") }
+func (p *ParentDir) GetFilesLocked(fs.SortBy, fs.SortOrder) iter.Seq[fs.Item] {
+ panic("must not be called")
+}
+func (p *ParentDir) RLock() func() { panic("must not be called") }
+func (p *ParentDir) RemoveFile(item fs.Item) { panic("must not be called") }
+func (p *ParentDir) RemoveFileByName(name string) { panic("must not be called") }
+func (p *ParentDir) GetItemStats(
+ linkedItems fs.HardLinkedItems,
+) (itemCount, size, usage int64) {
+ panic("must not be called")
+}
--- /dev/null
+package analyze
+
+import (
+ "os"
+ "slices"
+ "testing"
+ "time"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestStoredAnalyzerGetProgressChan(t *testing.T) {
+ analyzer := CreateStoredAnalyzer("/tmp/test")
+ progressChan := analyzer.GetProgressChan()
+ assert.NotNil(t, progressChan)
+}
+
+func TestStoredAnalyzerSetFollowSymlinks(t *testing.T) {
+ analyzer := CreateStoredAnalyzer("/tmp/test")
+ analyzer.SetFollowSymlinks(true)
+ assert.True(t, analyzer.followSymlinks)
+ analyzer.SetFollowSymlinks(false)
+ assert.False(t, analyzer.followSymlinks)
+}
+
+func TestStoredAnalyzerSetShowAnnexedSize(t *testing.T) {
+ analyzer := CreateStoredAnalyzer("/tmp/test")
+ analyzer.SetShowAnnexedSize(true)
+ assert.True(t, analyzer.gitAnnexedSize)
+ analyzer.SetShowAnnexedSize(false)
+ assert.False(t, analyzer.gitAnnexedSize)
+}
+
+func TestStoredDirGetFilesCached(t *testing.T) {
+ // Test when files are already cached
+ files := make(fs.Files, 0)
+ dir := &StoredDir{
+ Dir: &Dir{
+ File: &File{
+ Name: "test",
+ },
+ BasePath: "/test",
+ },
+ cachedFiles: files,
+ }
+
+ result := slices.Collect(dir.GetFiles(fs.SortByName, fs.SortAsc))
+ assert.Equal(t, len(files), len(result))
+}
+
+func TestStoredDirRemoveFile(t *testing.T) {
+ // Test RemoveFile functionality
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ analyzer := CreateStoredAnalyzer("/tmp/test")
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*StoredDir)
+
+ analyzer.GetDone().Wait()
+
+ // Remove a file
+ files := slices.Collect(dir.GetFiles(fs.SortByName, fs.SortAsc))
+ if len(files) > 0 {
+ dir.RemoveFile(files[0])
+ }
+}
+
+func TestStoredDirUpdateStats(t *testing.T) {
+ // Test UpdateStats functionality
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ analyzer := CreateStoredAnalyzer("/tmp/test")
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*StoredDir)
+
+ analyzer.GetDone().Wait()
+
+ dir.UpdateStats(make(fs.HardLinkedItems))
+}
+
+func TestStoredDirUpdateStatsWithMtimeUpdate(t *testing.T) {
+ // Test UpdateStats with mtime updates
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ analyzer := CreateStoredAnalyzer("/tmp/test")
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*StoredDir)
+
+ analyzer.GetDone().Wait()
+
+ // Create a file with newer mtime
+ file := &File{
+ Name: "newfile",
+ Mtime: time.Now().Add(time.Hour),
+ }
+ dir.AddFile(file)
+
+ dir.UpdateStats(make(fs.HardLinkedItems))
+}
+
+func TestStoredDirUpdateStatsWithFlagUpdate(t *testing.T) {
+ // Test UpdateStats with flag updates
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ analyzer := CreateStoredAnalyzer("/tmp/test")
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*StoredDir)
+
+ analyzer.GetDone().Wait()
+
+ // Create a file with error flag
+ file := &File{
+ Name: "errorfile",
+ Flag: '!',
+ }
+ dir.AddFile(file)
+
+ dir.UpdateStats(make(fs.HardLinkedItems))
+ // Just test that UpdateStats runs without error
+ // The flag behavior depends on the specific implementation
+}
+
+func TestStoredDirUpdateStatsWithDotFlag(t *testing.T) {
+ // Test UpdateStats with dot flag
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ analyzer := CreateStoredAnalyzer("/tmp/test")
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*StoredDir)
+
+ analyzer.GetDone().Wait()
+
+ // Create a file with dot flag
+ file := &File{
+ Name: "dotfile",
+ Flag: '.',
+ }
+ dir.AddFile(file)
+
+ dir.UpdateStats(make(fs.HardLinkedItems))
+ assert.Equal(t, '.', dir.Flag)
+}
+
+func TestStoredAnalyzerWithZip(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ // Create valid zip
+ createTestZipFile(t, "test_dir/valid.zip")
+
+ // Create invalid zip
+ f, err := os.Create("test_dir/invalid.zip")
+ assert.NoError(t, err)
+ _, err = f.WriteString("this is not a zip file")
+ assert.NoError(t, err)
+ f.Close()
+
+ analyzer := CreateStoredAnalyzer("/tmp/test")
+ analyzer.SetArchiveBrowsing(true)
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*StoredDir)
+
+ analyzer.GetDone().Wait()
+
+ // Check valid.zip
+ var validZip fs.Item
+ var invalidZip fs.Item
+
+ for _, file := range dir.Files {
+ if file.GetName() == "valid.zip" {
+ validZip = file
+ }
+ if file.GetName() == "invalid.zip" {
+ invalidZip = file
+ }
+ }
+
+ assert.NotNil(t, validZip)
+ assert.True(t, validZip.IsDir())
+ assert.Greater(t, validZip.GetSize(), int64(0))
+
+ assert.NotNil(t, invalidZip)
+ assert.False(t, invalidZip.IsDir())
+ assert.Equal(t, int64(22), invalidZip.GetSize())
+}
--- /dev/null
+package analyze
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "slices"
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEncDec(t *testing.T) {
+ var d fs.Item = &StoredDir{
+ Dir: &Dir{
+ File: &File{
+ Name: "xxx",
+ },
+ BasePath: "/yyy",
+ },
+ }
+
+ b := &bytes.Buffer{}
+ enc := gob.NewEncoder(b)
+ err := enc.Encode(d)
+ assert.NoError(t, err)
+
+ var x fs.Item = &StoredDir{}
+ dec := gob.NewDecoder(b)
+ err = dec.Decode(x)
+ assert.NoError(t, err)
+
+ fmt.Println(d, x)
+ assert.Equal(t, d.GetName(), x.GetName())
+}
+
+func TestStoredAnalyzer(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ a := CreateStoredAnalyzer("/tmp/badger")
+ dir := a.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*StoredDir)
+
+ a.GetDone().Wait()
+
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ // test dir info
+ assert.Equal(t, "test_dir", dir.Name)
+ assert.Equal(t, int64(7+4096*3), dir.Size)
+ assert.Equal(t, int64(5), dir.ItemCount)
+ assert.True(t, dir.IsDir())
+
+ // test dir tree
+ files := slices.Collect(dir.GetFiles(fs.SortByName, fs.SortAsc))
+ assert.Equal(t, "nested", files[0].GetName())
+
+ nested := files[0].(*StoredDir)
+ nestedFiles := slices.Collect(nested.GetFiles(fs.SortByName, fs.SortAsc))
+ assert.Equal(t, "subnested", nestedFiles[1].GetName())
+
+ // test file
+ assert.Equal(t, "file2", nestedFiles[0].GetName())
+ assert.Equal(t, int64(2), nestedFiles[0].GetSize())
+ assert.True(t, int64(4096) <= nestedFiles[0].GetUsage())
+
+ subnested := nestedFiles[1].(*StoredDir)
+ subnestedFiles := slices.Collect(subnested.GetFiles(fs.SortByName, fs.SortAsc))
+ assert.Equal(t, "file", subnestedFiles[0].GetName())
+ assert.Equal(t, int64(5), subnestedFiles[0].GetSize())
+}
+
+func TestRemoveStoredFile(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ a := CreateStoredAnalyzer("/tmp/badger")
+ dir := a.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*StoredDir)
+
+ a.GetDone().Wait()
+ a.ResetProgress()
+
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ // test dir info
+ assert.Equal(t, "test_dir", dir.Name)
+ assert.Equal(t, int64(7+4096*3), dir.Size)
+ assert.Equal(t, int64(5), dir.ItemCount)
+ assert.True(t, dir.IsDir())
+
+ dirFiles := slices.Collect(dir.GetFiles(fs.SortByName, fs.SortAsc))
+ subdir := dirFiles[0].(*StoredDir)
+ subdirFiles := slices.Collect(subdir.GetFiles(fs.SortByName, fs.SortAsc))
+ subdir.RemoveFile(subdirFiles[0])
+
+ closeFn := DefaultStorage.Open()
+ defer closeFn()
+ stored, err := DefaultStorage.GetDirForPath("test_dir")
+ assert.NoError(t, err)
+
+ assert.Equal(t, int64(4), stored.GetItemCount())
+ assert.Equal(t, int64(5+4096*3), stored.GetSize())
+
+ storedFiles := slices.Collect(stored.GetFiles(fs.SortByName, fs.SortAsc))
+ storedNested := storedFiles[0].(*StoredDir)
+ storedNestedFiles := slices.Collect(storedNested.GetFiles(fs.SortByName, fs.SortAsc))
+ storedSubnested := storedNestedFiles[0].(*StoredDir)
+ storedSubnestedFiles := slices.Collect(storedSubnested.GetFiles(fs.SortByName, fs.SortAsc))
+ file := storedSubnestedFiles[0]
+ assert.Equal(t, false, file.IsDir())
+ assert.Equal(t, "file", file.GetName())
+ assert.Equal(t, "test_dir/nested/subnested", file.GetParent().GetPath())
+}
+
+func TestParentDirGetNamePanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.GetName()
+}
+
+func TestParentDirGetFlagPanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.GetFlag()
+}
+
+func TestParentDirIsDirPanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.IsDir()
+}
+
+func TestParentDirGetSizePanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.GetSize()
+}
+
+func TestParentDirGetTypePanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.GetType()
+}
+
+func TestParentDirGetUsagePanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.GetUsage()
+}
+
+func TestParentDirGetMtimePanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.GetMtime()
+}
+
+func TestParentDirGetItemCountPanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.GetItemCount()
+}
+
+func TestParentDirGetParentPanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.GetParent()
+}
+
+func TestParentDirSetParentPanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.SetParent(nil)
+}
+
+func TestParentDirGetMultiLinkedInodePanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.GetMultiLinkedInode()
+}
+
+func TestParentDirEncodeJSONPanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ err := dir.EncodeJSON(nil, false)
+ assert.NoError(t, err)
+}
+
+func TestParentDirUpdateStatsPanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.UpdateStats(nil)
+}
+
+func TestParentDirAddFilePanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.AddFile(nil)
+}
+
+func TestParentDirGetFilesPanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.GetFiles(fs.SortByName, fs.SortAsc)
+}
+
+func TestParentDirGetFilesLockedPanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.GetFilesLocked(fs.SortByName, fs.SortAsc)
+}
+
+func TestParentDirRLockPanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.RLock()
+}
+
+func TestParentDirRemoveFilePanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.RemoveFile(nil)
+}
+
+func TestParentDirGetItemStatsPanics(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Equal(t, "must not be called", r)
+ }
+ }()
+ dir := &ParentDir{}
+ dir.GetItemStats(nil)
+}
--- /dev/null
+package analyze
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/dundee/gdu/v5/pkg/annex"
+)
+
+func followSymlink(path string, gitAnnexedSize bool) (tInfo os.FileInfo, err error) {
+ target, err := filepath.EvalSymlinks(path)
+ if err != nil {
+ target, err = os.Readlink(path)
+ if err != nil {
+ return nil, err
+ }
+ if gitAnnexedSize && strings.Contains(target, ".git/annex/objects") {
+ tInfo, err = os.Lstat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ name := filepath.Base(target)
+ tInfo = annex.AnnexedFileInfo(tInfo, name)
+ return tInfo, nil
+ }
+ }
+
+ tInfo, err = os.Lstat(target)
+ if err != nil {
+ return nil, err
+ }
+
+ if tInfo.IsDir() {
+ return nil, nil
+ }
+
+ return tInfo, nil
+}
--- /dev/null
+package analyze
+
+import (
+ "os"
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFollowSymlinkErr(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ err := os.Mkdir("test_dir/empty", 0o644)
+ assert.Nil(t, err)
+
+ err = os.Symlink(
+ ".git/annex/objects/qx/qX/SHA256E-s967858083--"+
+ "3e54803fded8dc3a9ea68b106f7b51e04e33c79b4a7b32a860f0b22d89af5c65.mp4/SHA256E-s967858083--"+
+ "3e54803fded8dc3a9ea68b106f7b51e04e33c79b4a7b32a860f0b22d89af5c65.mp4",
+ "test_dir/nested/file3")
+ assert.Nil(t, err)
+
+ err = os.Symlink(
+ "test_dir/nested",
+ "test_dir/some_dir")
+ assert.Nil(t, err)
+
+ _, err = followSymlink("xxx", false)
+ assert.ErrorContains(t, err, "no such file or directory")
+
+ _, err = followSymlink("test_dir/nested/file3", false)
+ assert.ErrorContains(t, err, "no such file or directory")
+
+ _, err = followSymlink("test_dir/nested/file3", true)
+ assert.NoError(t, err)
+
+ res, err := followSymlink("test_dir/some_dir", true)
+ assert.Equal(t, nil, res)
+ assert.NoError(t, err)
+}
--- /dev/null
+package analyze
+
+import (
+ "sort"
+
+ "github.com/dundee/gdu/v5/pkg/fs"
+)
+
+// TopList is a list of top largest files
+type TopList struct {
+ Items fs.Files
+ Count int
+ MinSize int64
+}
+
+// NewTopList creates new TopList
+func NewTopList(count int) *TopList {
+ return &TopList{Count: count}
+}
+
+// Add adds file to the list
+func (tl *TopList) Add(file fs.Item) {
+ if file.GetSize() > tl.MinSize || len(tl.Items) < tl.Count {
+ tl.Items = append(tl.Items, file)
+ sort.Sort(fs.ByApparentSize(tl.Items))
+ if len(tl.Items) > tl.Count {
+ tl.Items = tl.Items[1:]
+ }
+ tl.MinSize = tl.Items[0].GetSize()
+ }
+}
+
+func CollectTopFiles(dir fs.Item, count int) fs.Files {
+ topList := NewTopList(count)
+ walkDir(dir, topList)
+ sort.Sort(sort.Reverse(fs.ByApparentSize(topList.Items)))
+ return topList.Items
+}
+
+func walkDir(dir fs.Item, topList *TopList) {
+ for item := range dir.GetFiles(fs.SortBySize, fs.SortDesc) {
+ if item.IsDir() {
+ walkDir(item, topList)
+ } else {
+ topList.Add(item)
+ }
+ }
+}
--- /dev/null
+package analyze
+
+import (
+ "sort"
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCollectTopFiles2(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ dir := CreateAnalyzer().AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ )
+
+ topFiles := CollectTopFiles(dir, 2)
+ assert.Equal(t, 2, len(topFiles))
+ assert.Equal(t, "file", topFiles[0].GetName())
+ assert.Equal(t, int64(5), topFiles[0].GetSize())
+ assert.Equal(t, "file2", topFiles[1].GetName())
+ assert.Equal(t, int64(2), topFiles[1].GetSize())
+}
+
+func TestCollectTopFiles1(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ dir := CreateAnalyzer().AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ )
+
+ topFiles := CollectTopFiles(dir, 1)
+ assert.Equal(t, 1, len(topFiles))
+ assert.Equal(t, "file", topFiles[0].GetName())
+ assert.Equal(t, int64(5), topFiles[0].GetSize())
+}
+
+func TestAdd2(t *testing.T) {
+ topList := NewTopList(2)
+ topList.Add(&File{Size: 1, Name: "file1"})
+ topList.Add(&File{Size: 5, Name: "file5"})
+ topList.Add(&File{Size: 2, Name: "file2"})
+
+ sort.Sort(sort.Reverse(fs.ByApparentSize(topList.Items)))
+
+ assert.Equal(t, 2, len(topList.Items))
+ assert.Equal(t, "file5", topList.Items[0].GetName())
+ assert.Equal(t, "file2", topList.Items[1].GetName())
+}
+
+func TestAdd3(t *testing.T) {
+ topList := NewTopList(3)
+ topList.Add(&File{Size: 5, Name: "file5"})
+ topList.Add(&File{Size: 1, Name: "file1"})
+ topList.Add(&File{Size: 2, Name: "file2"})
+ topList.Add(&File{Size: 4, Name: "file4"})
+ topList.Add(&File{Size: 3, Name: "file3"})
+
+ sort.Sort(sort.Reverse(fs.ByApparentSize(topList.Items)))
+
+ assert.Equal(t, 3, len(topList.Items))
+ assert.Equal(t, "file5", topList.Items[0].GetName())
+ assert.Equal(t, "file4", topList.Items[1].GetName())
+ assert.Equal(t, "file3", topList.Items[2].GetName())
+}
--- /dev/null
+package analyze
+
+import "sync"
+
+// A WaitGroup waits for a collection of goroutines to finish.
+// In contrast to sync.WaitGroup Add method can be called from a goroutine.
+type WaitGroup struct {
+ wait sync.Mutex
+ value int
+ access sync.Mutex
+}
+
+// Init prepares the WaitGroup for usage, locks
+func (s *WaitGroup) Init() *WaitGroup {
+ s.wait.Lock()
+ return s
+}
+
+// Add increments value
+func (s *WaitGroup) Add(value int) {
+ s.access.Lock()
+ s.value += value
+ s.access.Unlock()
+}
+
+// Done decrements the value by one, if value is 0, lock is released
+func (s *WaitGroup) Done() {
+ s.access.Lock()
+ s.value--
+ s.check()
+ s.access.Unlock()
+}
+
+// Wait blocks until value is 0
+func (s *WaitGroup) Wait() {
+ s.access.Lock()
+ isValue := s.value > 0
+ s.access.Unlock()
+ if isValue {
+ s.wait.Lock()
+ }
+}
+
+func (s *WaitGroup) check() {
+ if s.value == 0 {
+ s.wait.TryLock()
+ s.wait.Unlock()
+ }
+}
--- /dev/null
+package analyze
+
+import (
+ "archive/zip"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/dundee/gdu/v5/pkg/fs"
+)
+
+// ZipDir represents a directory structure inside a zip file
+type ZipDir struct {
+ *Dir
+ zipPath string // path to the original zip file
+}
+
+// ZipFile represents a file inside a zip archive
+type ZipFile struct {
+ *File
+ zipPath string
+ inZipPath string // path inside the zip file
+}
+
+// GetPath returns the virtual path for zip file
+func (zf *ZipFile) GetPath() string {
+ return zf.zipPath + "/" + zf.inZipPath
+}
+
+// GetType returns type of zip file
+func (zf *ZipFile) GetType() string {
+ return "ZipFile"
+}
+
+// EncodeJSON encodes zip file to JSON
+func (zf *ZipFile) EncodeJSON(writer io.Writer, topLevel bool) error {
+ // Use the embedded File's EncodeJSON method
+ return zf.File.EncodeJSON(writer, topLevel)
+}
+
+// GetType returns type of zip directory
+func (zd *ZipDir) GetType() string {
+ return "ZipDirectory"
+}
+
+// IsDir returns true for ZipDir
+func (zd *ZipDir) IsDir() bool {
+ return true
+}
+
+// EncodeJSON encodes zip directory to JSON
+func (zd *ZipDir) EncodeJSON(writer io.Writer, topLevel bool) error {
+ // Use the embedded Dir's EncodeJSON method
+ return zd.Dir.EncodeJSON(writer, topLevel)
+}
+
+// GetPath returns the virtual path for zip directory
+func (zd *ZipDir) GetPath() string {
+ if zd.Parent != nil {
+ return filepath.Join(zd.Parent.GetPath(), zd.Name)
+ }
+ return zd.zipPath
+}
+
+// isZipFile checks if a file is a zip or jar file
+func isZipFile(filename string) bool {
+ ext := strings.ToLower(filepath.Ext(filename))
+ return ext == ".zip" || ext == ".jar"
+}
+
+// processZipFile processes a zip file and returns a ZipDir representing its contents
+func processZipFile(zipPath string, info os.FileInfo) (zipDir *ZipDir, err error) {
+ reader, err := zip.OpenReader(zipPath)
+ if err != nil {
+ return nil, err
+ }
+ defer reader.Close()
+
+ // Create root directory
+ zipDir = &ZipDir{
+ Dir: &Dir{
+ File: &File{
+ Name: filepath.Base(zipPath),
+ Flag: 'Z', // Use 'Z' to identify zip files
+ Size: info.Size(),
+ Usage: info.Size(),
+ Mtime: info.ModTime(),
+ },
+ ItemCount: 1,
+ Files: make(fs.Files, 0),
+ },
+ zipPath: zipPath,
+ }
+
+ // Use map to store directory structure
+ dirMap := make(map[string]*ZipDir)
+ dirMap[""] = zipDir // root directory
+
+ for _, f := range reader.File {
+ if f.FileInfo().IsDir() {
+ continue // Skip directory entries, we'll create them automatically based on file paths
+ }
+
+ // Parse file path and ensure all parent directories exist
+ dirPath := filepath.Dir(f.Name)
+ if dirPath == "." {
+ dirPath = "" // root directory
+ }
+ ensureZipDirExists(dirMap, dirPath, zipPath, zipDir)
+
+ // Create file item
+ parentDir := dirMap[dirPath]
+ zipFile := &ZipFile{
+ File: &File{
+ Name: filepath.Base(f.Name),
+ Flag: ' ',
+ Size: int64(f.UncompressedSize64),
+ Usage: int64(f.CompressedSize64),
+ Mtime: f.FileInfo().ModTime(),
+ Parent: parentDir,
+ },
+ zipPath: zipPath,
+ inZipPath: f.Name,
+ }
+
+ parentDir.AddFile(zipFile)
+ }
+
+ return zipDir, nil
+}
+
+// ensureZipDirExists ensures all directories in the specified path exist
+func ensureZipDirExists(dirMap map[string]*ZipDir, path, zipPath string, rootDir *ZipDir) {
+ if path == "" || path == "." {
+ return
+ }
+
+ // If directory already exists, return directly
+ if _, exists := dirMap[path]; exists {
+ return
+ }
+
+ // Ensure parent directory exists
+ parentPath := filepath.Dir(path)
+ if parentPath != "." && parentPath != "" {
+ ensureZipDirExists(dirMap, parentPath, zipPath, rootDir)
+ }
+
+ // Create current directory
+ var parent *ZipDir
+ if parentPath == "" || parentPath == "." {
+ parent = rootDir
+ } else {
+ parent = dirMap[parentPath]
+ }
+
+ newDir := &ZipDir{
+ Dir: &Dir{
+ File: &File{
+ Name: filepath.Base(path),
+ Flag: 'Z',
+ Size: 4096, // virtual directory size
+ Usage: 4096,
+ Mtime: time.Now(),
+ Parent: parent,
+ },
+ ItemCount: 1,
+ Files: make(fs.Files, 0),
+ },
+ zipPath: zipPath,
+ }
+
+ dirMap[path] = newDir
+ parent.AddFile(newDir)
+}
+
+// getZipFileSize gets the total uncompressed size of a zip file
+func getZipFileSize(zipPath string) (uncompressed, compressed int64, err error) {
+ reader, err := zip.OpenReader(zipPath)
+ if err != nil {
+ return 0, 0, err
+ }
+ defer reader.Close()
+
+ var uncompressedSize, compressedSize int64
+ for _, f := range reader.File {
+ if !f.FileInfo().IsDir() {
+ uncompressedSize += int64(f.UncompressedSize64)
+ compressedSize += int64(f.CompressedSize64)
+ }
+ }
+
+ return uncompressedSize, compressedSize, nil
+}
--- /dev/null
+package analyze
+
+import (
+ "archive/zip"
+ "bytes"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestZipFileGetPath(t *testing.T) {
+ zipFile := &ZipFile{
+ zipPath: "/path/to/archive.zip",
+ inZipPath: "folder/file.txt",
+ }
+
+ path := zipFile.GetPath()
+ assert.Equal(t, "/path/to/archive.zip/folder/file.txt", path)
+}
+
+func TestZipFileEncodeJSON(t *testing.T) {
+ zipFile := &ZipFile{
+ File: &File{
+ Name: "test.txt",
+ Size: 100,
+ },
+ zipPath: "/path/to/archive.zip",
+ inZipPath: "test.txt",
+ }
+
+ var buf bytes.Buffer
+ err := zipFile.EncodeJSON(&buf, false)
+ assert.NoError(t, err)
+ assert.NotEmpty(t, buf.String())
+}
+
+func TestZipDirEncodeJSON(t *testing.T) {
+ zipDir := &ZipDir{
+ Dir: &Dir{
+ File: &File{
+ Name: "folder",
+ },
+ },
+ zipPath: "/path/to/archive.zip",
+ }
+
+ var buf bytes.Buffer
+ err := zipDir.EncodeJSON(&buf, false)
+ assert.NoError(t, err)
+ assert.NotEmpty(t, buf.String())
+}
+
+func TestZipDirGetPathWithParent(t *testing.T) {
+ parent := &ZipDir{
+ Dir: &Dir{
+ File: &File{
+ Name: "parent",
+ },
+ },
+ zipPath: "/path/to/archive.zip",
+ }
+
+ zipDir := &ZipDir{
+ Dir: &Dir{
+ File: &File{
+ Name: "child",
+ },
+ },
+ zipPath: "/path/to/archive.zip",
+ }
+ zipDir.Parent = parent
+
+ path := zipDir.GetPath()
+ assert.Equal(t, filepath.Join(parent.GetPath(), "child"), path)
+}
+
+func TestZipDirGetPathWithoutParent(t *testing.T) {
+ zipDir := &ZipDir{
+ Dir: &Dir{
+ File: &File{
+ Name: "root",
+ },
+ },
+ zipPath: "/path/to/archive.zip",
+ }
+
+ path := zipDir.GetPath()
+ assert.Equal(t, "/path/to/archive.zip", path)
+}
+
+func TestProcessZipFileWithEmptyZip(t *testing.T) {
+ // Create a temporary zip file
+ zipPath := "/tmp/empty.zip"
+ defer os.Remove(zipPath)
+
+ // Create an empty zip file
+ file, err := os.Create(zipPath)
+ assert.NoError(t, err)
+ file.Close()
+
+ // Create a zip file with no entries
+ zipFile, err := os.Create(zipPath)
+ assert.NoError(t, err)
+ writer := zip.NewWriter(zipFile)
+ writer.Close()
+ zipFile.Close()
+
+ info, err := os.Stat(zipPath)
+ assert.NoError(t, err)
+
+ zipDir, err := processZipFile(zipPath, info)
+ assert.NoError(t, err)
+ assert.NotNil(t, zipDir)
+ assert.Equal(t, "empty.zip", zipDir.Name)
+ assert.Equal(t, 'Z', zipDir.Flag)
+}
+
+func TestProcessZipFileWithDirectoryEntries(t *testing.T) {
+ // Create a temporary zip file
+ zipPath := "/tmp/dir_entries.zip"
+ defer os.Remove(zipPath)
+
+ // Create a zip file with directory entries
+ zipFile, err := os.Create(zipPath)
+ assert.NoError(t, err)
+ writer := zip.NewWriter(zipFile)
+
+ // Add a directory entry
+ _, err = writer.Create("folder/")
+ assert.NoError(t, err)
+
+ // Add a file in the directory
+ fileWriter, err := writer.Create("folder/file.txt")
+ assert.NoError(t, err)
+ fileWriter.Write([]byte("test content"))
+
+ writer.Close()
+ zipFile.Close()
+
+ info, err := os.Stat(zipPath)
+ assert.NoError(t, err)
+
+ zipDir, err := processZipFile(zipPath, info)
+ assert.NoError(t, err)
+ assert.NotNil(t, zipDir)
+ assert.Equal(t, "dir_entries.zip", zipDir.Name)
+}
+
+func TestProcessZipFileWithNestedDirectories(t *testing.T) {
+ // Create a temporary zip file
+ zipPath := "/tmp/nested.zip"
+ defer os.Remove(zipPath)
+
+ // Create a zip file with nested directories
+ zipFile, err := os.Create(zipPath)
+ assert.NoError(t, err)
+ writer := zip.NewWriter(zipFile)
+
+ // Add files in nested directories
+ fileWriter, err := writer.Create("level1/level2/file.txt")
+ assert.NoError(t, err)
+ fileWriter.Write([]byte("nested content"))
+
+ writer.Close()
+ zipFile.Close()
+
+ info, err := os.Stat(zipPath)
+ assert.NoError(t, err)
+
+ zipDir, err := processZipFile(zipPath, info)
+ assert.NoError(t, err)
+ assert.NotNil(t, zipDir)
+ assert.Equal(t, "nested.zip", zipDir.Name)
+}
+
+func TestProcessZipFileWithRootFiles(t *testing.T) {
+ // Create a temporary zip file
+ zipPath := "/tmp/root_files.zip"
+ defer os.Remove(zipPath)
+
+ // Create a zip file with files in root
+ zipFile, err := os.Create(zipPath)
+ assert.NoError(t, err)
+ writer := zip.NewWriter(zipFile)
+
+ // Add files in root directory
+ fileWriter, err := writer.Create("file1.txt")
+ assert.NoError(t, err)
+ fileWriter.Write([]byte("file1 content"))
+
+ fileWriter, err = writer.Create("file2.txt")
+ assert.NoError(t, err)
+ fileWriter.Write([]byte("file2 content"))
+
+ writer.Close()
+ zipFile.Close()
+
+ info, err := os.Stat(zipPath)
+ assert.NoError(t, err)
+
+ zipDir, err := processZipFile(zipPath, info)
+ assert.NoError(t, err)
+ assert.NotNil(t, zipDir)
+ assert.Equal(t, "root_files.zip", zipDir.Name)
+}
+
+func TestProcessZipFileError(t *testing.T) {
+ // Test with non-existent file
+ zipDir, err := processZipFile("/non/existent/file.zip", nil)
+ assert.Error(t, err)
+ assert.Nil(t, zipDir)
+}
+
+func TestGetZipFileSizeWithEmptyZip(t *testing.T) {
+ // Create a temporary zip file
+ zipPath := "/tmp/empty_size.zip"
+ defer os.Remove(zipPath)
+
+ // Create an empty zip file
+ zipFile, err := os.Create(zipPath)
+ assert.NoError(t, err)
+ writer := zip.NewWriter(zipFile)
+ writer.Close()
+ zipFile.Close()
+
+ uncompressed, compressed, err := getZipFileSize(zipPath)
+ assert.NoError(t, err)
+ assert.Equal(t, int64(0), uncompressed)
+ assert.Equal(t, int64(0), compressed)
+}
+
+func TestGetZipFileSizeWithFiles(t *testing.T) {
+ // Create a temporary zip file
+ zipPath := "/tmp/size_test.zip"
+ defer os.Remove(zipPath)
+
+ // Create a zip file with files
+ zipFile, err := os.Create(zipPath)
+ assert.NoError(t, err)
+ writer := zip.NewWriter(zipFile)
+
+ // Add a file
+ fileWriter, err := writer.Create("test.txt")
+ assert.NoError(t, err)
+ fileWriter.Write([]byte("test content"))
+
+ writer.Close()
+ zipFile.Close()
+
+ uncompressed, compressed, err := getZipFileSize(zipPath)
+ assert.NoError(t, err)
+ assert.Greater(t, uncompressed, int64(0))
+ assert.Greater(t, compressed, int64(0))
+}
+
+func TestGetZipFileSizeWithDirectories(t *testing.T) {
+ // Create a temporary zip file
+ zipPath := "/tmp/dir_size.zip"
+ defer os.Remove(zipPath)
+
+ // Create a zip file with directories
+ zipFile, err := os.Create(zipPath)
+ assert.NoError(t, err)
+ writer := zip.NewWriter(zipFile)
+
+ // Add a directory entry (should be ignored)
+ _, err = writer.Create("folder/")
+ assert.NoError(t, err)
+
+ // Add a file
+ fileWriter, err := writer.Create("file.txt")
+ assert.NoError(t, err)
+ fileWriter.Write([]byte("test content"))
+
+ writer.Close()
+ zipFile.Close()
+
+ uncompressed, compressed, err := getZipFileSize(zipPath)
+ assert.NoError(t, err)
+ assert.Greater(t, uncompressed, int64(0))
+ assert.Greater(t, compressed, int64(0))
+}
+
+func TestGetZipFileSizeError(t *testing.T) {
+ // Test with non-existent file
+ uncompressed, compressed, err := getZipFileSize("/non/existent/file.zip")
+ assert.Error(t, err)
+ assert.Equal(t, int64(0), uncompressed)
+ assert.Equal(t, int64(0), compressed)
+}
+
+func TestEnsureZipDirExistsWithEmptyPath(t *testing.T) {
+ dirMap := make(map[string]*ZipDir)
+ rootDir := &ZipDir{
+ Dir: &Dir{
+ File: &File{
+ Name: "root",
+ },
+ },
+ zipPath: "/test.zip",
+ }
+
+ ensureZipDirExists(dirMap, "", "/test.zip", rootDir)
+ // Should not create any new directories for empty path
+ assert.Len(t, dirMap, 0)
+}
+
+func TestEnsureZipDirExistsWithDotPath(t *testing.T) {
+ dirMap := make(map[string]*ZipDir)
+ rootDir := &ZipDir{
+ Dir: &Dir{
+ File: &File{
+ Name: "root",
+ },
+ },
+ zipPath: "/test.zip",
+ }
+
+ ensureZipDirExists(dirMap, ".", "/test.zip", rootDir)
+ // Should not create any new directories for dot path
+ assert.Len(t, dirMap, 0)
+}
+
+func TestEnsureZipDirExistsWithExistingPath(t *testing.T) {
+ dirMap := make(map[string]*ZipDir)
+ existingDir := &ZipDir{
+ Dir: &Dir{
+ File: &File{
+ Name: "existing",
+ },
+ },
+ zipPath: "/test.zip",
+ }
+ dirMap["existing"] = existingDir
+
+ rootDir := &ZipDir{
+ Dir: &Dir{
+ File: &File{
+ Name: "root",
+ },
+ },
+ zipPath: "/test.zip",
+ }
+
+ ensureZipDirExists(dirMap, "existing", "/test.zip", rootDir)
+ // Should not create a new directory for existing path
+ assert.Len(t, dirMap, 1)
+ assert.Equal(t, existingDir, dirMap["existing"])
+}
+
+func TestEnsureZipDirExistsWithNestedPath(t *testing.T) {
+ dirMap := make(map[string]*ZipDir)
+ rootDir := &ZipDir{
+ Dir: &Dir{
+ File: &File{
+ Name: "root",
+ },
+ },
+ zipPath: "/test.zip",
+ }
+ dirMap[""] = rootDir
+
+ ensureZipDirExists(dirMap, "level1/level2", "/test.zip", rootDir)
+
+ // Should create both level1 and level1/level2 directories
+ assert.Contains(t, dirMap, "level1")
+ assert.Contains(t, dirMap, "level1/level2")
+ assert.Equal(t, "level1", dirMap["level1"].Name)
+ assert.Equal(t, "level2", dirMap["level1/level2"].Name)
+}
+
+func TestIsZipFileFunction(t *testing.T) {
+ assert.True(t, isZipFile("test.zip"))
+ assert.True(t, isZipFile("test.ZIP"))
+ assert.True(t, isZipFile("test.jar"))
+ assert.True(t, isZipFile("test.JAR"))
+ assert.False(t, isZipFile("test.txt"))
+ assert.False(t, isZipFile("test.tar"))
+ assert.False(t, isZipFile("test.gz"))
+}
--- /dev/null
+package analyze
+
+import (
+ "archive/zip"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSequentialAnalyzerWithZipFile(t *testing.T) {
+ // Create temporary directory and zip file
+ tempDir := t.TempDir()
+ zipPath := filepath.Join(tempDir, "test.zip")
+
+ // Create test zip file
+ createTestZipFile(t, zipPath)
+
+ // Create analyzer
+ analyzer := CreateSeqAnalyzer()
+ analyzer.SetArchiveBrowsing(true)
+
+ // Analyze directory (containing zip file)
+ result := analyzer.AnalyzeDir(tempDir, func(string, string) bool { return false }, func(string) bool { return false })
+
+ // Verify result
+ assert.NotNil(t, result)
+ assert.True(t, result.IsDir())
+
+ // Find zip file
+ var zipItem fs.Item
+ for file := range result.GetFiles(fs.SortByName, fs.SortAsc) {
+ if file.GetName() == "test.zip" {
+ zipItem = file
+ break
+ }
+ }
+
+ assert.NotNil(t, zipItem, "should find zip file")
+ assert.True(t, zipItem.IsDir(), "zip file should be treated as directory")
+
+ // Verify zip file content
+ zipFilesCount := 0
+ foundTextFile := false
+ for file := range zipItem.GetFiles(fs.SortByName, fs.SortAsc) {
+ zipFilesCount++
+ if file.GetName() == "test.txt" {
+ foundTextFile = true
+ assert.False(t, file.IsDir())
+ }
+ }
+ assert.Greater(t, zipFilesCount, 0, "zip file should contain content")
+ assert.True(t, foundTextFile, "should find test.txt in zip file")
+}
+
+func TestParallelAnalyzerWithZipFile(t *testing.T) {
+ // Create temporary directory and zip file
+ tempDir := t.TempDir()
+ zipPath := filepath.Join(tempDir, "test.jar") // test jar file
+
+ // Create test jar file (actually a zip file)
+ createTestZipFile(t, zipPath)
+
+ // Create parallel analyzer
+ analyzer := CreateAnalyzer()
+ analyzer.SetArchiveBrowsing(true)
+
+ // Analyze directory
+ result := analyzer.AnalyzeDir(tempDir, func(string, string) bool { return false }, func(string) bool { return false })
+
+ // Verify result
+ assert.NotNil(t, result)
+ assert.True(t, result.IsDir())
+
+ // Find jar file
+ var jarItem fs.Item
+ for file := range result.GetFiles(fs.SortByName, fs.SortAsc) {
+ if file.GetName() == "test.jar" {
+ jarItem = file
+ break
+ }
+ }
+
+ assert.NotNil(t, jarItem, "should find jar file")
+ assert.True(t, jarItem.IsDir(), "jar file should be treated as directory")
+
+ // Verify jar file content
+ jarFilesCount := 0
+ for range jarItem.GetFiles(fs.SortByName, fs.SortAsc) {
+ jarFilesCount++
+ }
+ assert.Greater(t, jarFilesCount, 0, "jar file should contain content")
+}
+
+func TestZipFileWithNestedStructure(t *testing.T) {
+ // Create temporary directory
+ tempDir := t.TempDir()
+ zipPath := filepath.Join(tempDir, "nested.zip")
+
+ // Create zip file with complex nested structure
+ createComplexZipFile(t, zipPath)
+
+ // Create analyzer
+ analyzer := CreateSeqAnalyzer()
+ analyzer.SetArchiveBrowsing(true)
+
+ // Analyze directory
+ result := analyzer.AnalyzeDir(tempDir, func(string, string) bool { return false }, func(string) bool { return false })
+
+ // Find zip file
+ var zipItem fs.Item
+ for file := range result.GetFiles(fs.SortByName, fs.SortAsc) {
+ if file.GetName() == "nested.zip" {
+ zipItem = file
+ break
+ }
+ }
+
+ assert.NotNil(t, zipItem)
+
+ // Find deeply nested directory
+ var level1Dir fs.Item
+ for file := range zipItem.GetFiles(fs.SortByName, fs.SortAsc) {
+ if file.GetName() == "level1" && file.IsDir() {
+ level1Dir = file
+ break
+ }
+ }
+ assert.NotNil(t, level1Dir, "should find level1 directory")
+
+ // Find level2 directory
+ var level2Dir fs.Item
+ for file := range level1Dir.GetFiles(fs.SortByName, fs.SortAsc) {
+ if file.GetName() == "level2" && file.IsDir() {
+ level2Dir = file
+ break
+ }
+ }
+ assert.NotNil(t, level2Dir, "should find level2 directory")
+
+ // Find deepest nested file
+ foundDeepFile := false
+ for file := range level2Dir.GetFiles(fs.SortByName, fs.SortAsc) {
+ if file.GetName() == "deep.txt" {
+ foundDeepFile = true
+ break
+ }
+ }
+ assert.True(t, foundDeepFile, "should find deeply nested file")
+}
+
+// createComplexZipFile creates a zip file with complex nested structure
+func createComplexZipFile(t *testing.T, zipPath string) {
+ file, err := os.Create(zipPath)
+ assert.NoError(t, err)
+ defer file.Close()
+
+ zipWriter := zip.NewWriter(file)
+ defer zipWriter.Close()
+
+ // Create multi-level nested structure
+ files := []struct {
+ name string
+ content string
+ }{
+ {"root.txt", "Root level file"},
+ {"level1/file1.txt", "Level 1 file"},
+ {"level1/level2/file2.txt", "Level 2 file"},
+ {"level1/level2/deep.txt", "Deep nested file"},
+ {"level1/level2/level3/file3.txt", "Level 3 file"},
+ {"another/path/file.txt", "Another path file"},
+ }
+
+ for _, f := range files {
+ writer, err := zipWriter.Create(f.name)
+ assert.NoError(t, err)
+ _, err = writer.Write([]byte(f.content))
+ assert.NoError(t, err)
+ }
+}
--- /dev/null
+package analyze
+
+import (
+ "archive/zip"
+ "os"
+ "path/filepath"
+ "slices"
+ "testing"
+
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestIsZipFile(t *testing.T) {
+ tests := []struct {
+ filename string
+ expected bool
+ }{
+ {"test.zip", true},
+ {"test.jar", true},
+ {"TEST.ZIP", true},
+ {"TEST.JAR", true},
+ {"test.txt", false},
+ {"test.tar.gz", false},
+ {"test", false},
+ {"", false},
+ }
+
+ for _, test := range tests {
+ result := isZipFile(test.filename)
+ assert.Equal(t, test.expected, result, "filename: %s", test.filename)
+ }
+}
+
+func TestProcessZipFile(t *testing.T) {
+ // Create temporary zip file
+ tempDir := t.TempDir()
+ zipPath := filepath.Join(tempDir, "test.zip")
+
+ // Create zip file
+ createTestZipFile(t, zipPath)
+
+ // Get file info
+ info, err := os.Stat(zipPath)
+ assert.NoError(t, err)
+
+ // Process zip file
+ zipDir, err := processZipFile(zipPath, info)
+ assert.NoError(t, err)
+ assert.NotNil(t, zipDir)
+
+ // Verify zip directory properties
+ assert.Equal(t, "test.zip", zipDir.GetName(), "Name must include extension")
+ assert.Equal(t, rune('Z'), zipDir.GetFlag())
+ assert.True(t, zipDir.IsDir())
+ assert.Equal(t, "ZipDirectory", zipDir.GetType())
+
+ // Verify file structure
+ files := slices.Collect(zipDir.GetFiles(fs.SortByName, fs.SortAsc))
+ assert.Greater(t, len(files), 0)
+
+ // Debug: print all files
+ t.Logf("Found %d files in zip:", len(files))
+ for _, file := range files {
+ t.Logf(" - %s (isDir: %t, type: %s)", file.GetName(), file.IsDir(), file.GetType())
+ }
+
+ // Find files
+ foundTextFile := false
+ foundSubdir := false
+
+ for _, file := range files {
+ if file.GetName() == "test.txt" {
+ foundTextFile = true
+ assert.False(t, file.IsDir())
+ assert.Equal(t, "ZipFile", file.GetType())
+ }
+ if file.GetName() == "subdir" {
+ foundSubdir = true
+ assert.True(t, file.IsDir())
+ assert.Equal(t, "ZipDirectory", file.GetType())
+ }
+ }
+
+ assert.True(t, foundTextFile, "should find test.txt file")
+ assert.True(t, foundSubdir, "should find subdir directory")
+}
+
+func TestGetZipFileSize(t *testing.T) {
+ // Create temporary zip file
+ tempDir := t.TempDir()
+ zipPath := filepath.Join(tempDir, "test.zip")
+
+ // Create zip file
+ createTestZipFile(t, zipPath)
+
+ // Get size
+ uncompressed, compressed, err := getZipFileSize(zipPath)
+ assert.NoError(t, err)
+ assert.Greater(t, uncompressed, int64(0))
+ assert.Greater(t, compressed, int64(0))
+ // Note: for small files, compressed size might be larger
+ t.Logf("Uncompressed size: %d, Compressed size: %d", uncompressed, compressed)
+}
+
+func TestEnsureZipDirExists(t *testing.T) {
+ tempDir := t.TempDir()
+ zipPath := filepath.Join(tempDir, "test.zip")
+
+ // Create root directory
+ rootDir := &ZipDir{
+ Dir: &Dir{
+ File: &File{
+ Name: "test.zip",
+ Flag: 'Z',
+ },
+ Files: make(fs.Files, 0),
+ },
+ zipPath: zipPath,
+ }
+
+ dirMap := make(map[string]*ZipDir)
+ dirMap[""] = rootDir
+
+ // Ensure nested directory structure is created
+ ensureZipDirExists(dirMap, "dir1/dir2/dir3", zipPath, rootDir)
+
+ // Verify directory structure
+ assert.Contains(t, dirMap, "dir1")
+ assert.Contains(t, dirMap, "dir1/dir2")
+ assert.Contains(t, dirMap, "dir1/dir2/dir3")
+
+ // Verify parent-child relationships
+ dir1 := dirMap["dir1"]
+ assert.Equal(t, rootDir, dir1.GetParent())
+
+ dir2 := dirMap["dir1/dir2"]
+ assert.Equal(t, dir1, dir2.GetParent())
+
+ dir3 := dirMap["dir1/dir2/dir3"]
+ assert.Equal(t, dir2, dir3.GetParent())
+}
+
+// createTestZipFile creates a test zip file
+func createTestZipFile(t *testing.T, zipPath string) {
+ file, err := os.Create(zipPath)
+ assert.NoError(t, err)
+ defer file.Close()
+
+ zipWriter := zip.NewWriter(file)
+ defer zipWriter.Close()
+
+ // Add root directory file
+ writer, err := zipWriter.Create("test.txt")
+ assert.NoError(t, err)
+ _, err = writer.Write([]byte("Hello, this is a test file!"))
+ assert.NoError(t, err)
+
+ // Add subdirectory files
+ // We don't need to use the writer for the directory entry, avoid SA4006
+ _, err = zipWriter.Create("subdir/")
+ assert.NoError(t, err)
+
+ writer, err = zipWriter.Create("subdir/nested.txt")
+ assert.NoError(t, err)
+ _, err = writer.Write([]byte("This is a nested file."))
+ assert.NoError(t, err)
+
+ // Add deeper directory structure
+ writer, err = zipWriter.Create("dir1/dir2/deep.txt")
+ assert.NoError(t, err)
+ _, err = writer.Write([]byte("Deep nested file content."))
+ assert.NoError(t, err)
+}
--- /dev/null
+package annex
+
+import (
+ "fmt"
+ "io/fs"
+ "log"
+ "strconv"
+ "strings"
+)
+
+// SizeFromKey returns size from git-annex key.
+func SizeFromKey(name string) (size int64, err error) {
+ nameParts := strings.SplitN(name, "--", 2)
+ backendKVs := nameParts[0]
+ backendKVParts := strings.Split(backendKVs, "-")
+
+ if len(backendKVParts) < 2 {
+ return 0, fmt.Errorf("key is is missing backend")
+ }
+
+ for _, p := range backendKVParts[1:] {
+ if p == "" || p[0] != 's' {
+ continue
+ }
+
+ size, err = strconv.ParseInt(p[1:], 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("failed to parse size: %w", err)
+ }
+
+ return size, nil
+ }
+
+ return 0, fmt.Errorf("size not found in key")
+}
+
+// AnnexedFileInfo returns a new FileInfo with size from git-annex key.
+func AnnexedFileInfo(fi fs.FileInfo, name string) *FileInfo {
+ size, err := SizeFromKey(name)
+ if err != nil {
+ log.Print(err.Error())
+ return &FileInfo{FileInfo: fi}
+ }
+
+ afi := &FileInfo{
+ FileInfo: fi,
+ size: size,
+ }
+
+ return afi
+}
+
+var _ fs.FileInfo = (*FileInfo)(nil)
+
+// FileInfo is a wrapper around fs.FileInfo to overwrite the size.
+type FileInfo struct {
+ fs.FileInfo
+
+ size int64
+}
+
+// Length in bytes for regular files; system-dependent for others
+func (fi *FileInfo) Size() int64 {
+ return int64(fi.size)
+}
--- /dev/null
+package annex
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestAnnexedFileInfo(t *testing.T) {
+ fi := &FileInfo{}
+ fi = AnnexedFileInfo(fi, "SHA256E-s967858083--3e54803fded8dc3a9ea68b106f7b51e04e33c79b4a7b32a860f0b22d89af5c65.mp4")
+
+ assert.Equal(t, int64(967858083), fi.Size())
+}
+
+func TestAnnexedFileInfoErr(t *testing.T) {
+ fi := &FileInfo{}
+ fi = AnnexedFileInfo(fi, "xxx")
+
+ assert.Equal(t, int64(0), fi.Size())
+}
+
+func TestSizeFromKeyErr(t *testing.T) {
+ _, err := SizeFromKey("xxx")
+ assert.Error(t, err)
+ assert.ErrorContains(t, err, "key is is missing backend")
+
+ _, err = SizeFromKey("SHA256E-sXXX--3e54803fded8dc3a9ea68b106f7b51e04e33c79b4a7b32a860f0b22d89af5c65.mp4")
+ assert.Error(t, err)
+ assert.ErrorContains(t, err, "failed to parse size")
+
+ _, err = SizeFromKey("SHA256E-s--3e54803fded8dc3a9ea68b106f7b51e04e33c79b4a7b32a860f0b22d89af5c65.mp4")
+ assert.Error(t, err)
+ assert.ErrorContains(t, err, "failed to parse size")
+
+ _, err = SizeFromKey("SHA256E-a-b-c--3e54803fded8dc3a9ea68b106f7b51e04e33c79b4a7b32a860f0b22d89af5c65.mp4")
+ assert.Error(t, err)
+ assert.ErrorContains(t, err, "size not found in key")
+}
--- /dev/null
+package device
+
+import "strings"
+
+// Device struct
+type Device struct {
+ Name string
+ MountPoint string
+ Fstype string
+ Size int64
+ Free int64
+}
+
+// GetUsage returns used size of device
+func (d Device) GetUsage() int64 {
+ return d.Size - d.Free
+}
+
+// DevicesInfoGetter is type for GetDevicesInfo function
+type DevicesInfoGetter interface {
+ GetMounts() (Devices, error)
+ GetDevicesInfo() (Devices, error)
+}
+
+// Devices if slice of Device items
+type Devices []*Device
+
+// ByUsedSize sorts devices by used size
+type ByUsedSize Devices
+
+func (f ByUsedSize) Len() int { return len(f) }
+func (f ByUsedSize) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+func (f ByUsedSize) Less(i, j int) bool {
+ return f[i].GetUsage() < f[j].GetUsage()
+}
+
+// ByName sorts devices by device name
+type ByName Devices
+
+func (f ByName) Len() int { return len(f) }
+func (f ByName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+func (f ByName) Less(i, j int) bool {
+ return f[i].Name < f[j].Name
+}
+
+// GetNestedMountpointsPaths returns paths of nested mount points
+func GetNestedMountpointsPaths(path string, mounts Devices) []string {
+ paths := make([]string, 0, len(mounts))
+
+ for _, mount := range mounts {
+ if strings.HasPrefix(mount.MountPoint, path) && mount.MountPoint != path {
+ paths = append(paths, mount.MountPoint)
+ }
+ }
+ return paths
+}
--- /dev/null
+//go:build netbsd || openbsd
+
+package device
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "io"
+ "os/exec"
+ "regexp"
+ "strings"
+)
+
+// BSDDevicesInfoGetter returns info for Darwin devices
+type BSDDevicesInfoGetter struct {
+ MountCmd string
+}
+
+// Getter is current instance of DevicesInfoGetter
+var Getter DevicesInfoGetter = BSDDevicesInfoGetter{MountCmd: "/sbin/mount"}
+
+// GetMounts returns all mounted filesystems from output of /sbin/mount
+func (t BSDDevicesInfoGetter) GetMounts() (devices Devices, err error) {
+ out, err := exec.Command(t.MountCmd).Output()
+ if err != nil {
+ return nil, err
+ }
+
+ rdr := bytes.NewReader(out)
+
+ return readMountOutput(rdr)
+}
+
+// GetDevicesInfo returns result of GetMounts with usage info about mounted devices (by calling Statfs syscall)
+func (t BSDDevicesInfoGetter) GetDevicesInfo() (devices Devices, err error) {
+ mounts, err := t.GetMounts()
+ if err != nil {
+ return nil, err
+ }
+
+ return processMounts(mounts, false)
+}
+
+func readMountOutput(rdr io.Reader) (mounts Devices, err error) {
+ scanner := bufio.NewScanner(rdr)
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ re := regexp.MustCompile("^(.*) on (/.*) type (.*) \\(([^)]+)\\)$")
+ parts := re.FindAllStringSubmatch(line, -1)
+
+ if len(parts) < 1 {
+ return nil, errors.New("Cannot parse mount output")
+ }
+
+ fstype := strings.TrimSpace(strings.Split(parts[0][3], ",")[0])
+
+ device := &Device{
+ Name: parts[0][1],
+ MountPoint: parts[0][2],
+ Fstype: fstype,
+ }
+ mounts = append(mounts, device)
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return mounts, nil
+}
--- /dev/null
+//go:build freebsd || openbsd || netbsd || darwin
+
+package device
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGetDevicesInfo(t *testing.T) {
+ getter := BSDDevicesInfoGetter{MountCmd: "/sbin/mount"}
+ devices, _ := getter.GetDevicesInfo()
+ assert.IsType(t, Devices{}, devices)
+}
+
+func TestGetDevicesInfoFail(t *testing.T) {
+ getter := BSDDevicesInfoGetter{MountCmd: "/nonexistent"}
+ _, err := getter.GetDevicesInfo()
+ assert.Equal(t, "fork/exec /nonexistent: no such file or directory", err.Error())
+}
--- /dev/null
+//go:build freebsd || darwin
+
+package device
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "io"
+ "os/exec"
+ "regexp"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+// BSDDevicesInfoGetter returns info for Darwin devices
+type BSDDevicesInfoGetter struct {
+ MountCmd string
+}
+
+// Getter is current instance of DevicesInfoGetter
+var Getter DevicesInfoGetter = BSDDevicesInfoGetter{MountCmd: "/sbin/mount"}
+
+// GetMounts returns all mounted filesystems from output of /sbin/mount
+func (t BSDDevicesInfoGetter) GetMounts() (devices Devices, err error) {
+ var out []byte
+ out, err = exec.Command(t.MountCmd).Output()
+ if err != nil {
+ return nil, err
+ }
+
+ rdr := bytes.NewReader(out)
+
+ return readMountOutput(rdr)
+}
+
+// GetDevicesInfo returns result of GetMounts with usage info about mounted devices (by calling Statfs syscall)
+func (t BSDDevicesInfoGetter) GetDevicesInfo() (devices Devices, err error) {
+ var mounts Devices
+ mounts, err = t.GetMounts()
+ if err != nil {
+ return nil, err
+ }
+
+ return processMounts(mounts, false)
+}
+
+func readMountOutput(rdr io.Reader) (mounts Devices, err error) {
+ scanner := bufio.NewScanner(rdr)
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ re := regexp.MustCompile(`^(.*) on (/.*) \(([^)]+)\)$`)
+ parts := re.FindAllStringSubmatch(line, -1)
+
+ if len(parts) < 1 {
+ return nil, errors.New("cannot parse mount output")
+ }
+
+ fstype := strings.TrimSpace(strings.Split(parts[0][3], ",")[0])
+
+ device := &Device{
+ Name: parts[0][1],
+ MountPoint: parts[0][2],
+ Fstype: fstype,
+ }
+ mounts = append(mounts, device)
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return mounts, nil
+}
+
+func processMounts(mounts Devices, ignoreErrors bool) (devices Devices, err error) {
+ for _, mount := range mounts {
+ if !strings.HasPrefix(mount.Name, "/dev") && mount.Fstype != "zfs" {
+ continue
+ }
+
+ info := &unix.Statfs_t{}
+ err := unix.Statfs(mount.MountPoint, info)
+ if err != nil && !ignoreErrors {
+ return nil, err
+ }
+
+ mount.Size = int64(info.Bsize) * int64(info.Blocks)
+ mount.Free = int64(info.Bsize) * int64(info.Bavail)
+
+ devices = append(devices, mount)
+ }
+
+ return devices, nil
+}
--- /dev/null
+//go:build freebsd || darwin
+
+package device
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestZfsMountsShown(t *testing.T) {
+ mounts, _ := readMountOutput(strings.NewReader(`/dev/ada0p2 on / (ufs, local, soft-updates)
+devfs on /dev (devfs)
+tmpfs on /tmp (tmpfs, local)
+fdescfs on /dev/fd (fdescfs)
+procfs on /proc (procfs, local)
+t on /t (zfs, local, nfsv4acls)
+t/db on /t/db (zfs, local, nfsv4acls)
+t/vm on /t/vm (zfs, local, nfsv4acls)
+t/log/pflog on /var/log/pflog (zfs, local, nfsv4acls)
+t/log on /t/log (zfs, local, nfsv4acls)
+devfs on /compat/linux/dev (devfs)
+fdescfs on /compat/linux/dev/fd (fdescfs)
+tmpfs on /compat/linux/dev/shm (tmpfs, local)
+map -hosts on /net (autofs)
+argon:/usr/src on /usr/src (nfs)
+argon:/usr/obj on /usr/obj (nfs)`))
+
+ devices, err := processMounts(mounts, true)
+ assert.Len(t, devices, 6)
+ assert.Nil(t, err)
+}
+
+func TestMountsWithSpace(t *testing.T) {
+ mounts, err := readMountOutput(strings.NewReader(
+ `//inglor@vault.lan/volatile on /Users/inglor/Mountpoints/volatile (vault.lan) (smbfs, nodev, nosuid, mounted by inglor)`,
+ ))
+ assert.Equal(t, "//inglor@vault.lan/volatile", mounts[0].Name)
+ assert.Equal(t, "/Users/inglor/Mountpoints/volatile (vault.lan)", mounts[0].MountPoint)
+ assert.Equal(t, "smbfs", mounts[0].Fstype)
+ assert.Nil(t, err)
+}
--- /dev/null
+package device
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+// LinuxDevicesInfoGetter returns info for Linux devices
+type LinuxDevicesInfoGetter struct {
+ MountsPath string
+}
+
+// Getter is current instance of DevicesInfoGetter
+var Getter DevicesInfoGetter = LinuxDevicesInfoGetter{MountsPath: "/proc/mounts"}
+
+// GetMounts returns all mounted filesystems from /proc/mounts
+func (t LinuxDevicesInfoGetter) GetMounts() (devices Devices, err error) {
+ file, err := os.Open(t.MountsPath)
+ if err != nil {
+ return nil, err
+ }
+
+ devices, err = readMountsFile(file)
+ if err != nil {
+ if cerr := file.Close(); cerr != nil {
+ return nil, fmt.Errorf("%w; %s", err, cerr.Error())
+ }
+ return nil, err
+ }
+ if err := file.Close(); err != nil {
+ return nil, err
+ }
+ return devices, nil
+}
+
+// GetDevicesInfo returns result of GetMounts with usage info about mounted devices (by calling Statfs syscall)
+func (t LinuxDevicesInfoGetter) GetDevicesInfo() (devices Devices, err error) {
+ mounts, err := t.GetMounts()
+ if err != nil {
+ return nil, err
+ }
+
+ return processMounts(mounts, false)
+}
+
+func readMountsFile(file io.Reader) (mounts Devices, err error) {
+ mounts = Devices{}
+
+ scanner := bufio.NewScanner(file)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(line)
+
+ device := &Device{
+ Name: parts[0],
+ MountPoint: unescapeString(parts[1]),
+ Fstype: parts[2],
+ }
+ mounts = append(mounts, device)
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return mounts, nil
+}
+
+func processMounts(mounts Devices, ignoreErrors bool) (devices Devices, err error) {
+ devices = Devices{}
+
+ for _, mount := range mounts {
+ if strings.Contains(mount.MountPoint, "/snap/") {
+ continue
+ }
+
+ if strings.HasPrefix(mount.Name, "/dev") ||
+ mount.Fstype == "zfs" ||
+ mount.Fstype == "nfs" ||
+ mount.Fstype == "nfs4" {
+ info := &unix.Statfs_t{}
+ err = unix.Statfs(mount.MountPoint, info)
+ if err != nil && !ignoreErrors {
+ return nil, err
+ }
+
+ mount.Size = int64(info.Bsize) * int64(info.Blocks)
+ mount.Free = int64(info.Bsize) * int64(info.Bavail)
+
+ devices = append(devices, mount)
+ }
+ }
+
+ return devices, nil
+}
+
+func unescapeString(str string) string {
+ return strings.ReplaceAll(str, "\\040", " ")
+}
--- /dev/null
+//go:build linux
+
+package device
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGetDevicesInfo(t *testing.T) {
+ getter := LinuxDevicesInfoGetter{MountsPath: "/proc/mounts"}
+ devices, _ := getter.GetDevicesInfo()
+ assert.IsType(t, Devices{}, devices)
+}
+
+func TestGetDevicesInfoFail(t *testing.T) {
+ getter := LinuxDevicesInfoGetter{MountsPath: "/xxxyyy"}
+ _, err := getter.GetDevicesInfo()
+ assert.Equal(t, "open /xxxyyy: no such file or directory", err.Error())
+}
+
+func TestSnapMountsNotShown(t *testing.T) {
+ mounts, _ := readMountsFile(strings.NewReader(`/dev/loop4 /var/lib/snapd/snap/core18/1944 squashfs ro,nodev,relatime 0 0
+/dev/loop3 /var/lib/snapd/snap/core20/904 squashfs ro,nodev,relatime 0 0
+/dev/nvme0n1p1 /boot vfat rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=ascii,shortname=mixed,utf8,errors=remount-ro 0 0`))
+
+ devices, err := processMounts(mounts, true)
+ assert.Len(t, devices, 1)
+ assert.Nil(t, err)
+}
+
+func TestZfsMountsShown(t *testing.T) {
+ mounts, _ := readMountsFile(strings.NewReader(`rootpool/opt /opt zfs rw,nodev,relatime,xattr,posixacl 0 0
+rootpool/usr/local /usr/local zfs rw,nodev,relatime,xattr,posixacl 0 0
+rootpool/home/root /root zfs rw,nodev,relatime,xattr,posixacl 0 0
+rootpool/usr/games /usr/games zfs rw,nodev,relatime,xattr,posixacl 0 0
+rootpool/home /home zfs rw,nodev,relatime,xattr,posixacl 0 0
+/dev/loop4 /var/lib/snapd/snap/core18/1944 squashfs ro,nodev,relatime 0 0
+/dev/loop3 /var/lib/snapd/snap/core20/904 squashfs ro,nodev,relatime 0 0
+/dev/nvme0n1p1 /boot vfat rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=ascii,shortname=mixed,utf8,errors=remount-ro 0 0`))
+
+ devices, err := processMounts(mounts, true)
+ assert.Len(t, devices, 6)
+ assert.Nil(t, err)
+}
+
+func TestNfsMountsShown(t *testing.T) {
+ // nolint: lll // Why: Test data
+ mounts, _ := readMountsFile(strings.NewReader(`host1:/dir1/ /mnt/dir1 nfs4 rw,nosuid,nodev,noatime,nodiratime,vers=4.2,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.1,fsc,local_lock=none,addr=192.168.1.2 0 0
+host2:/dir2/ /mnt/dir2 nfs rw,relatime,vers=3,rsize=524288,wsize=524288,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.3,mountvers=3,mountport=38081,mountproto=udp,fsc,local_lock=none,addr=192.168.1.4 0 0`))
+
+ devices, err := processMounts(mounts, true)
+ assert.Len(t, devices, 2)
+ assert.Equal(t, "host1:/dir1/", devices[0].Name)
+ assert.Equal(t, "/mnt/dir1", devices[0].MountPoint)
+ assert.Nil(t, err)
+}
+
+func TestMountsWithSpaces(t *testing.T) {
+ // nolint: lll // Why: Test data
+ mounts, _ := readMountsFile(strings.NewReader(`host1:/dir1/ /mnt/dir\040with\040spaces nfs4 rw,nosuid,nodev,noatime,nodiratime,vers=4.2,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.1,fsc,local_lock=none,addr=192.168.1.2 0 0
+host2:/dir2/ /mnt/dir2 nfs rw,relatime,vers=3,rsize=524288,wsize=524288,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.3,mountvers=3,mountport=38081,mountproto=udp,fsc,local_lock=none,addr=192.168.1.4 0 0`))
+
+ devices, err := processMounts(mounts, true)
+ assert.Len(t, devices, 2)
+ assert.Equal(t, "host1:/dir1/", devices[0].Name)
+ assert.Equal(t, "/mnt/dir with spaces", devices[0].MountPoint)
+ assert.Nil(t, err)
+}
--- /dev/null
+//go:build netbsd
+
+package device
+
+import (
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+func processMounts(mounts Devices, ignoreErrors bool) (devices Devices, err error) {
+ for _, mount := range mounts {
+ if strings.HasPrefix(mount.Name, "/dev") || mount.Fstype == "zfs" {
+ info := &unix.Statvfs_t{}
+ err = unix.Statvfs(mount.MountPoint, info)
+ if err != nil && !ignoreErrors {
+ return nil, err
+ }
+
+ mount.Size = int64(info.Bsize) * int64(info.Blocks)
+ mount.Free = int64(info.Bsize) * int64(info.Bavail)
+
+ devices = append(devices, mount)
+ }
+ }
+
+ return devices, nil
+}
--- /dev/null
+//go:build openbsd
+
+package device
+
+import (
+ "fmt"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+func processMounts(mounts Devices, ignoreErrors bool) (devices Devices, err error) {
+ for _, mount := range mounts {
+ if strings.HasPrefix(mount.Name, "/dev") || mount.Fstype == "zfs" {
+ info := &unix.Statfs_t{}
+ err = unix.Statfs(mount.MountPoint, info)
+ if err != nil && !ignoreErrors {
+ return nil, fmt.Errorf("getting stats for mount point: \"%s\", %w", mount.MountPoint, err)
+ }
+
+ mount.Size = int64(info.F_bsize) * int64(info.F_blocks)
+ mount.Free = int64(info.F_bsize) * int64(info.F_bavail)
+
+ devices = append(devices, mount)
+ }
+ }
+
+ return devices, nil
+}
--- /dev/null
+//go:build windows || plan9
+
+package device
+
+import "errors"
+
+// OtherDevicesInfoGetter returns info for other devices
+type OtherDevicesInfoGetter struct{}
+
+// Getter is current instance of DevicesInfoGetter
+var Getter DevicesInfoGetter = OtherDevicesInfoGetter{}
+
+// GetDevicesInfo returns result of GetMounts with usage info about mounted devices
+func (t OtherDevicesInfoGetter) GetDevicesInfo() (devices Devices, err error) {
+ return nil, errors.New("Only Linux platform is supported for listing devices")
+}
+
+// GetMounts returns all mounted filesystems
+func (t OtherDevicesInfoGetter) GetMounts() (devices Devices, err error) {
+ return nil, errors.New("Only Linux platform is supported for listing mount points")
+}
--- /dev/null
+package device
+
+import (
+ "sort"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNested(t *testing.T) {
+ item := &Device{
+ MountPoint: "/xxx",
+ }
+ nested := &Device{
+ MountPoint: "/xxx/yyy",
+ }
+ notNested := &Device{
+ MountPoint: "/zzz/yyy",
+ }
+
+ mounts := Devices{item, nested, notNested}
+
+ mountsNested := GetNestedMountpointsPaths("/xxx", mounts)
+
+ assert.Len(t, mountsNested, 1)
+ assert.Equal(t, "/xxx/yyy", mountsNested[0])
+}
+
+func TestSortByName(t *testing.T) {
+ item := &Device{
+ Name: "/xxx",
+ }
+ nested := &Device{
+ Name: "/xxx/yyy",
+ }
+ notNested := &Device{
+ Name: "/zzz/yyy",
+ }
+
+ devices := Devices{item, nested, notNested}
+
+ sort.Sort(sort.Reverse(ByName(devices)))
+
+ assert.Equal(t, "/zzz/yyy", devices[0].Name)
+ assert.Equal(t, "/xxx/yyy", devices[1].Name)
+ assert.Equal(t, "/xxx", devices[2].Name)
+}
+
+func TestSortByUsedSize(t *testing.T) {
+ item := &Device{
+ Name: "xxx",
+ Size: 1e12,
+ Free: 1e3,
+ }
+ nested := &Device{
+ Name: "yyy",
+ Size: 1e12,
+ Free: 1e6,
+ }
+ notNested := &Device{
+ Name: "zzz",
+ Size: 1e12,
+ Free: 1e12,
+ }
+
+ devices := Devices{item, nested, notNested}
+
+ sort.Sort(ByUsedSize(devices))
+
+ assert.Equal(t, "zzz", devices[0].Name)
+ assert.Equal(t, "yyy", devices[1].Name)
+ assert.Equal(t, "xxx", devices[2].Name)
+}
--- /dev/null
+package fs
+
+import (
+ "io"
+ "iter"
+ "time"
+
+ "github.com/maruel/natural"
+)
+
+// SortBy represents the field to sort files by
+type SortBy int
+
+const (
+ SortBySize SortBy = iota
+ SortByName
+ SortByItemCount
+ SortByMtime
+ SortByApparentSize
+)
+
+// SortOrder represents the sort direction
+type SortOrder int
+
+const (
+ SortAsc SortOrder = iota
+ SortDesc
+)
+
+// Item is a FS item (file or dir)
+type Item interface {
+ GetPath() string
+ GetName() string
+ GetFlag() rune
+ IsDir() bool
+ GetSize() int64
+ GetType() string
+ GetUsage() int64
+ GetMtime() time.Time
+ GetItemCount() int64
+ GetParent() Item
+ SetParent(Item)
+ GetMultiLinkedInode() uint64
+ EncodeJSON(writer io.Writer, topLevel bool) error
+ GetItemStats(linkedItems HardLinkedItems) (itemCount int64, size, usage int64)
+ UpdateStats(linkedItems HardLinkedItems)
+ AddFile(Item)
+ GetFiles(SortBy, SortOrder) iter.Seq[Item]
+ GetFilesLocked(SortBy, SortOrder) iter.Seq[Item]
+ RemoveFile(Item)
+ RemoveFileByName(name string)
+ RLock() func()
+}
+
+// Files - slice of pointers to File
+type Files []Item
+
+// HardLinkedItems maps inode number to array of all hard linked items
+type HardLinkedItems map[uint64]Files
+
+// IndexOf searches File in Files and returns its index
+func (f Files) IndexOf(file Item) (int, bool) {
+ for i, item := range f {
+ if item == file {
+ return i, true
+ }
+ }
+ return 0, false
+}
+
+// FindByName searches name in Files and returns its index
+func (f Files) FindByName(name string) (int, bool) {
+ for i, item := range f {
+ if item.GetName() == name {
+ return i, true
+ }
+ }
+ return 0, false
+}
+
+// Remove removes File from Files
+func (f Files) Remove(file Item) Files {
+ index, ok := f.IndexOf(file)
+ if !ok {
+ return f
+ }
+ return append(f[:index], f[index+1:]...)
+}
+
+// RemoveByName removes File from Files
+func (f Files) RemoveByName(name string) Files {
+ index, ok := f.FindByName(name)
+ if !ok {
+ return f
+ }
+ return append(f[:index], f[index+1:]...)
+}
+
+func (f Files) Len() int { return len(f) }
+func (f Files) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+func (f Files) Less(i, j int) bool {
+ if f[i].GetUsage() != f[j].GetUsage() {
+ return f[i].GetUsage() < f[j].GetUsage()
+ }
+ // if usage is the same, sort by name
+ return natural.Less(f[i].GetName(), f[j].GetName())
+}
+
+// ByApparentSize sorts files by apparent size
+type ByApparentSize Files
+
+func (f ByApparentSize) Len() int { return len(f) }
+func (f ByApparentSize) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+func (f ByApparentSize) Less(i, j int) bool {
+ if f[i].GetSize() != f[j].GetSize() {
+ return f[i].GetSize() < f[j].GetSize()
+ }
+ // if size is the same, sort by name
+ return natural.Less(f[i].GetName(), f[j].GetName())
+}
+
+// ByItemCount sorts files by item count
+type ByItemCount Files
+
+func (f ByItemCount) Len() int { return len(f) }
+func (f ByItemCount) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+func (f ByItemCount) Less(i, j int) bool {
+ if f[i].GetItemCount() != f[j].GetItemCount() {
+ return f[i].GetItemCount() < f[j].GetItemCount()
+ }
+ // if item count is the same, sort by name
+ return natural.Less(f[i].GetName(), f[j].GetName())
+}
+
+// ByName sorts files by name
+type ByName Files
+
+func (f ByName) Len() int { return len(f) }
+func (f ByName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+func (f ByName) Less(i, j int) bool { return natural.Less(f[i].GetName(), f[j].GetName()) }
+
+// ByMtime sorts files by name
+type ByMtime Files
+
+func (f ByMtime) Len() int { return len(f) }
+func (f ByMtime) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+func (f ByMtime) Less(i, j int) bool {
+ if !f[i].GetMtime().Equal(f[j].GetMtime()) {
+ return f[i].GetMtime().Before(f[j].GetMtime())
+ }
+ // if item count is the same, sort by name
+ return natural.Less(f[i].GetName(), f[j].GetName())
+}
+
+// ParseSortBy converts a string to SortBy
+func ParseSortBy(s string) SortBy {
+ switch s {
+ case "name":
+ return SortByName
+ case "size":
+ return SortBySize
+ case "itemCount":
+ return SortByItemCount
+ case "mtime":
+ return SortByMtime
+ default:
+ return SortBySize
+ }
+}
+
+// ParseSortOrder converts a string to SortOrder
+func ParseSortOrder(s string) SortOrder {
+ if s == "asc" {
+ return SortAsc
+ }
+ return SortDesc
+}
--- /dev/null
+package path
+
+import "strings"
+
+// ShortenPath removes the last but one path components to fit into maxLen
+func ShortenPath(path string, maxLen int) string {
+ if len(path) <= maxLen {
+ return path
+ }
+
+ res := ""
+ parts := strings.SplitAfter(path, "/")
+ curLen := len(parts[len(parts)-1]) // count length of last part for start
+
+ for _, part := range parts[:len(parts)-1] {
+ curLen += len(part)
+ if curLen > maxLen {
+ res += ".../"
+ break
+ }
+ res += part
+ }
+
+ res += parts[len(parts)-1]
+ return res
+}
--- /dev/null
+package path
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestShortenPath(t *testing.T) {
+ assert.Equal(t, "/root", ShortenPath("/root", 10))
+ assert.Equal(t, "/home/.../foo", ShortenPath("/home/dundee/foo", 10))
+ assert.Equal(t, "/home/dundee/foo", ShortenPath("/home/dundee/foo", 50))
+ assert.Equal(t, "/home/dundee/.../bar.txt", ShortenPath("/home/dundee/foo/bar.txt", 20))
+ assert.Equal(t, "/home/.../bar.txt", ShortenPath("/home/dundee/foo/bar.txt", 15))
+}
--- /dev/null
+package remove
+
+import (
+ "os"
+ "runtime"
+ "sync"
+
+ "github.com/dundee/gdu/v5/pkg/fs"
+)
+
+var concurrencyLimit = make(chan struct{}, 3*runtime.GOMAXPROCS(0))
+
+// ItemFromDirParallel removes item from dir
+func ItemFromDirParallel(dir, item fs.Item) error {
+ if !item.IsDir() {
+ return ItemFromDir(dir, item)
+ }
+ errChan := make(chan error, 1) // we show only first error
+ var wait sync.WaitGroup
+
+ // remove all files in the directory in parallel
+ for file := range item.GetFilesLocked(fs.SortBySize, fs.SortDesc) {
+ if !file.IsDir() {
+ continue
+ }
+
+ wait.Add(1)
+ go func(itemPath string) {
+ concurrencyLimit <- struct{}{}
+ defer func() { <-concurrencyLimit }()
+
+ err := os.RemoveAll(itemPath)
+ if err != nil {
+ select {
+ // write error to channel if it's empty
+ case errChan <- err:
+ default:
+ }
+ }
+ wait.Done()
+ }(file.GetPath())
+ }
+
+ wait.Wait()
+
+ // check if there was an error
+ select {
+ case err := <-errChan:
+ return err
+ default:
+ }
+
+ // remove the directory itself
+ err := os.RemoveAll(item.GetPath())
+ if err != nil {
+ return err
+ }
+
+ // update parent directory
+ dir.RemoveFile(item)
+ return nil
+}
--- /dev/null
+//go:build linux
+
+package remove
+
+import (
+ "os"
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestItemFromDirParallelWithErr(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ err := os.Chmod("test_dir/nested", 0)
+ assert.Nil(t, err)
+ defer func() {
+ err = os.Chmod("test_dir/nested", 0o755)
+ assert.Nil(t, err)
+ }()
+
+ dir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "test_dir",
+ },
+ BasePath: ".",
+ }
+
+ subdir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "nested",
+ Parent: dir,
+ },
+ }
+
+ err = ItemFromDirParallel(dir, subdir)
+ assert.Contains(t, err.Error(), "permission denied")
+}
+
+func TestItemFromDirParallelWithErr2(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ err := os.Chmod("test_dir/nested/subnested", 0)
+ assert.Nil(t, err)
+ defer func() {
+ err = os.Chmod("test_dir/nested/subnested", 0o755)
+ assert.Nil(t, err)
+ }()
+
+ analyzer := analyze.CreateAnalyzer()
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*analyze.Dir)
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ subdir := dir.Files[0].(*analyze.Dir)
+
+ err = ItemFromDirParallel(dir, subdir)
+ assert.Contains(t, err.Error(), "permission denied")
+}
--- /dev/null
+package remove
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/fs"
+)
+
+func TestRemoveFileParallel(t *testing.T) {
+ dir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "xxx",
+ Size: 5,
+ Usage: 12,
+ },
+ ItemCount: 3,
+ BasePath: ".",
+ }
+
+ subdir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "yyy",
+ Size: 4,
+ Usage: 8,
+ Parent: dir,
+ },
+ ItemCount: 2,
+ }
+ file := &analyze.File{
+ Name: "zzz",
+ Size: 3,
+ Usage: 4,
+ Parent: subdir,
+ }
+ dir.Files = fs.Files{subdir}
+ subdir.Files = fs.Files{file}
+
+ err := ItemFromDirParallel(subdir, file)
+ assert.Nil(t, err)
+
+ assert.Equal(t, 0, len(subdir.Files))
+ assert.Equal(t, int64(1), subdir.ItemCount)
+ assert.Equal(t, int64(1), subdir.Size)
+ assert.Equal(t, int64(4), subdir.Usage)
+ assert.Equal(t, 1, len(dir.Files))
+ assert.Equal(t, int64(2), dir.ItemCount)
+ assert.Equal(t, int64(2), dir.Size)
+}
+
+func TestRemoveDirParallel(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ analyzer := analyze.CreateAnalyzer()
+ dir := analyzer.AnalyzeDir(
+ "test_dir", func(_, _ string) bool { return false }, func(_ string) bool { return false },
+ ).(*analyze.Dir)
+ analyzer.GetDone().Wait()
+ dir.UpdateStats(make(fs.HardLinkedItems))
+
+ subdir := dir.Files[0].(*analyze.Dir)
+
+ err := ItemFromDirParallel(dir, subdir)
+ assert.Nil(t, err)
+}
--- /dev/null
+package remove
+
+import (
+ "os"
+
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/fs"
+)
+
+// ItemFromDir removes item from dir
+func ItemFromDir(dir, item fs.Item) error {
+ err := os.RemoveAll(item.GetPath())
+ if err != nil {
+ return err
+ }
+
+ dir.RemoveFile(item)
+ return nil
+}
+
+// EmptyFileFromDir empties file from dir (truncates to 0 bytes)
+func EmptyFileFromDir(dir, file fs.Item) error {
+ err := os.Truncate(file.GetPath(), 0)
+ if err != nil {
+ return err
+ }
+
+ // Remove old file and add zero-sized one
+ dir.RemoveFile(file)
+ newFile := &analyze.File{
+ Name: file.GetName(),
+ Flag: file.GetFlag(),
+ Size: 0,
+ Usage: 0,
+ Parent: dir,
+ }
+ dir.AddFile(newFile)
+ return nil
+}
--- /dev/null
+//go:build linux
+
+package remove
+
+import (
+ "os"
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestRemoveFileWithErr(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ err := os.Chmod("test_dir/nested", 0)
+ assert.Nil(t, err)
+ defer func() {
+ err = os.Chmod("test_dir/nested", 0o755)
+ assert.Nil(t, err)
+ }()
+
+ dir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "test_dir",
+ },
+ BasePath: ".",
+ }
+
+ subdir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "nested",
+ Parent: dir,
+ },
+ }
+
+ err = ItemFromDir(dir, subdir)
+ assert.Contains(t, err.Error(), "permission denied")
+}
--- /dev/null
+package remove
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/fs"
+)
+
+func TestTruncateFile(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ dir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "test_dir",
+ Size: 5,
+ Usage: 12,
+ },
+ ItemCount: 3,
+ BasePath: ".",
+ }
+
+ subdir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "nested",
+ Size: 4,
+ Usage: 8,
+ Parent: dir,
+ },
+ ItemCount: 2,
+ }
+ file := &analyze.File{
+ Name: "file2",
+ Size: 3,
+ Usage: 4,
+ Parent: subdir,
+ }
+ dir.Files = fs.Files{subdir}
+ subdir.Files = fs.Files{file}
+
+ err := EmptyFileFromDir(subdir, file)
+
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(subdir.Files))
+ assert.Equal(t, int64(1), subdir.ItemCount) // RemoveFile decrements, AddFile doesn't increment
+ assert.Equal(t, int64(1), subdir.Size)
+ assert.Equal(t, int64(4), subdir.Usage)
+ assert.Equal(t, 1, len(dir.Files))
+ assert.Equal(t, int64(2), dir.ItemCount) // RemoveFile decrements, AddFile doesn't increment
+ assert.Equal(t, int64(2), dir.Size)
+}
+
+func TestRemoveFile(t *testing.T) {
+ dir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "xxx",
+ Size: 5,
+ Usage: 12,
+ },
+ ItemCount: 3,
+ BasePath: ".",
+ }
+
+ subdir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "yyy",
+ Size: 4,
+ Usage: 8,
+ Parent: dir,
+ },
+ ItemCount: 2,
+ }
+ file := &analyze.File{
+ Name: "zzz",
+ Size: 3,
+ Usage: 4,
+ Parent: subdir,
+ }
+ dir.Files = fs.Files{subdir}
+ subdir.Files = fs.Files{file}
+
+ err := ItemFromDir(subdir, file)
+ assert.Nil(t, err)
+
+ assert.Equal(t, 0, len(subdir.Files))
+ assert.Equal(t, int64(1), subdir.ItemCount)
+ assert.Equal(t, int64(1), subdir.Size)
+ assert.Equal(t, int64(4), subdir.Usage)
+ assert.Equal(t, 1, len(dir.Files))
+ assert.Equal(t, int64(2), dir.ItemCount)
+ assert.Equal(t, int64(2), dir.Size)
+}
+
+func TestTruncateFileWithErr(t *testing.T) {
+ dir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "xxx",
+ Size: 5,
+ Usage: 12,
+ },
+ ItemCount: 3,
+ BasePath: ".",
+ }
+
+ subdir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "yyy",
+ Size: 4,
+ Usage: 8,
+ Parent: dir,
+ },
+ ItemCount: 2,
+ }
+ file := &analyze.File{
+ Name: "zzz",
+ Size: 3,
+ Usage: 4,
+ Parent: subdir,
+ }
+ dir.Files = fs.Files{subdir}
+ subdir.Files = fs.Files{file}
+
+ err := EmptyFileFromDir(subdir, file)
+
+ assert.Contains(t, err.Error(), "no such file or directory")
+}
--- /dev/null
+package timefilter
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// TimeBound represents a parsed time filter value that can be either an instant or a date-only value
+type TimeBound struct {
+ instant *time.Time // absolute instant (UTC)
+ dateOnly *time.Time // at local midnight; only YYYY-MM-DD will set this
+}
+
+// IsEmpty returns true if the TimeBound has no filter criteria
+func (tb TimeBound) IsEmpty() bool {
+ return tb.instant == nil && tb.dateOnly == nil
+}
+
+// TimeFilter represents multiple time filtering criteria
+type TimeFilter struct {
+ since []*TimeBound
+ until []*TimeBound
+}
+
+// NewTimeFilter creates a new TimeFilter with the given parameters
+func NewTimeFilter(since, until, maxAge, minAge string, now time.Time, loc *time.Location) (*TimeFilter, error) {
+ tf := &TimeFilter{}
+
+ // Parse since
+ if since != "" {
+ sinceBound, err := parseTimeValue(since, loc)
+ if err != nil {
+ return nil, fmt.Errorf("invalid --since value: %w", err)
+ }
+ if !sinceBound.IsEmpty() {
+ tf.since = append(tf.since, &sinceBound)
+ }
+ }
+
+ // Parse until
+ if until != "" {
+ untilBound, err := parseTimeValue(until, loc)
+ if err != nil {
+ return nil, fmt.Errorf("invalid --until value: %w", err)
+ }
+ if !untilBound.IsEmpty() {
+ tf.until = append(tf.until, &untilBound)
+ }
+ }
+
+ // Parse max-age (convert to since)
+ if maxAge != "" {
+ duration, err := parseDuration(maxAge)
+ if err != nil {
+ return nil, fmt.Errorf("invalid --max-age value: %w", err)
+ }
+ sinceTime := now.Add(-duration).UTC()
+ tf.since = append(tf.since, &TimeBound{instant: &sinceTime})
+ }
+
+ // Parse min-age (convert to until)
+ if minAge != "" {
+ duration, err := parseDuration(minAge)
+ if err != nil {
+ return nil, fmt.Errorf("invalid --min-age value: %w", err)
+ }
+ untilTime := now.Add(-duration).UTC()
+ tf.until = append(tf.until, &TimeBound{instant: &untilTime})
+ }
+
+ return tf, nil
+}
+
+// IncludeByTimeFilter determines if a file should be included based on the complete time filter
+func (tf *TimeFilter) IncludeByTimeFilter(mtime time.Time, loc *time.Location) bool {
+ // Check since bound
+ for _, since := range tf.since {
+ if !includeByTimeBound(mtime, *since, loc, false) {
+ return false
+ }
+ }
+
+ // Check until bound
+ for _, until := range tf.until {
+ if !includeByTimeBound(mtime, *until, loc, true) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// IsEmpty returns true if the TimeFilter has no filter criteria
+func (tf *TimeFilter) IsEmpty() bool {
+ return tf.since == nil && tf.until == nil
+}
+
+// FormatForDisplay returns a formatted string showing the active time filters
+// This shows what the program actually parsed and is acting on
+func (tf *TimeFilter) FormatForDisplay(loc *time.Location) string {
+ if tf.IsEmpty() {
+ return ""
+ }
+
+ var parts []string
+
+ for _, since := range tf.since {
+ if since.instant != nil {
+ parts = append(parts, "since="+since.instant.In(loc).Format(time.RFC3339))
+ } else if since.dateOnly != nil {
+ parts = append(parts, "since="+since.dateOnly.Format("2006-01-02")+" (date-only)")
+ }
+ }
+
+ for _, until := range tf.until {
+ if until.instant != nil {
+ parts = append(parts, "until=", until.instant.In(loc).Format(time.RFC3339))
+ } else if until.dateOnly != nil {
+ parts = append(parts, "until=", until.dateOnly.Format("2006-01-02")+" (date-only)")
+ }
+ }
+
+ if len(parts) == 0 {
+ return ""
+ }
+
+ return " Filtered by: time=mtime; " + strings.Join(parts, "; ")
+}
+
+// includeByTimeBound determines if a file should be included based on its mtime and the time bound
+func includeByTimeBound(mtime time.Time, tb TimeBound, loc *time.Location, isUntil bool) bool {
+ if tb.instant == nil && tb.dateOnly == nil {
+ return true // no filter applied
+ }
+
+ if tb.instant != nil {
+ if isUntil {
+ return !mtime.After(*tb.instant) // inclusive (<=)
+ }
+ return !mtime.Before(*tb.instant) // inclusive (>=)
+ }
+
+ if tb.dateOnly != nil {
+ // For date-only comparisons, adjust the bound to cover the whole day.
+ boundDate := tb.dateOnly.In(loc)
+
+ if isUntil {
+ // For `until`, we want to include the entire day.
+ // So the upper bound is the beginning of the *next* day.
+ upperBound := time.Date(boundDate.Year(), boundDate.Month(), boundDate.Day(), 0, 0, 0, 0, loc).AddDate(0, 0, 1)
+ return mtime.Before(upperBound)
+ }
+
+ // For `since`, we want to include the entire day.
+ // So the lower bound is the beginning of that day.
+ lowerBound := time.Date(boundDate.Year(), boundDate.Month(), boundDate.Day(), 0, 0, 0, 0, loc)
+ return !mtime.Before(lowerBound) // inclusive (>=)
+ }
+
+ return true
+}
+
+// parseDuration parses a duration string with support for extended units
+// Supports: s, m, h, d (=24h), w (=7d), mo (=30d), y (=365d)
+// Examples: "90m", "2h30m", "7d", "6w", "1y2mo"
+func parseDuration(input string) (time.Duration, error) {
+ if input == "" {
+ return 0, fmt.Errorf("empty duration")
+ }
+
+ // Remove whitespace and convert to lowercase
+ input = strings.ToLower(strings.ReplaceAll(input, " ", ""))
+
+ // Regex to match number+unit pairs (mo must come before m to avoid greedy matching)
+ re := regexp.MustCompile(`(\d+)(mo|s|m|h|d|w|y)`)
+ matches := re.FindAllStringSubmatch(input, -1)
+
+ if len(matches) == 0 {
+ return 0, fmt.Errorf("invalid duration format %q. Use combinations like 7d, 2h30m, 1y2mo", input)
+ }
+
+ // Check if the entire input was consumed by matches
+ consumed := ""
+ for _, match := range matches {
+ consumed += match[0]
+ }
+ if consumed != input {
+ return 0, fmt.Errorf("invalid duration format %q. Use combinations like 7d, 2h30m, 1y2mo", input)
+ }
+
+ var total time.Duration
+ for _, match := range matches {
+ value, err := strconv.Atoi(match[1])
+ if err != nil {
+ return 0, fmt.Errorf("invalid number in duration: %s", match[1])
+ }
+
+ unit := match[2]
+ var duration time.Duration
+
+ switch unit {
+ case "s":
+ duration = time.Duration(value) * time.Second
+ case "m":
+ duration = time.Duration(value) * time.Minute
+ case "h":
+ duration = time.Duration(value) * time.Hour
+ case "d":
+ duration = time.Duration(value) * 24 * time.Hour
+ case "w":
+ duration = time.Duration(value) * 7 * 24 * time.Hour
+ case "mo":
+ duration = time.Duration(value) * 30 * 24 * time.Hour
+ case "y":
+ duration = time.Duration(value) * 365 * 24 * time.Hour
+ default:
+ return 0, fmt.Errorf("unsupported duration unit: %s", unit)
+ }
+
+ total += duration
+ }
+
+ return total, nil
+}
+
+// parseTimeValue parses a time value into either a timestamp instant or a date-only value
+func parseTimeValue(arg string, loc *time.Location) (TimeBound, error) {
+ if arg == "" {
+ return TimeBound{}, nil
+ }
+
+ // 1) Try RFC3339 instant
+ if t, err := time.Parse(time.RFC3339, arg); err == nil {
+ u := t.UTC()
+ return TimeBound{instant: &u}, nil
+ }
+
+ // 2) Try strict YYYY-MM-DD
+ if len(arg) == 10 {
+ if d, err := time.ParseInLocation("2006-01-02", arg, loc); err == nil {
+ // dateOnly uses local date; we will compare date parts only
+ return TimeBound{dateOnly: &d}, nil
+ }
+ }
+
+ return TimeBound{}, fmt.Errorf("invalid time value %q. Use RFC3339 timestamp or YYYY-MM-DD", arg)
+}
--- /dev/null
+package timefilter
+
+import (
+ "testing"
+ "time"
+)
+
+func TestParseSince(t *testing.T) {
+ // Use America/Vancouver timezone for testing (UTC-7 or UTC-8 depending on DST)
+ loc, err := time.LoadLocation("America/Vancouver")
+ if err != nil {
+ t.Fatalf("Failed to load timezone: %v", err)
+ }
+
+ tests := []struct {
+ name string
+ input string
+ expectError bool
+ expectType string // "instant", "dateOnly", or "empty"
+ }{
+ {
+ name: "empty string",
+ input: "",
+ expectError: false,
+ expectType: "empty",
+ },
+ {
+ name: "RFC3339 with timezone",
+ input: "2025-08-11T01:00:00-07:00",
+ expectError: false,
+ expectType: "instant",
+ },
+ {
+ name: "RFC3339 UTC",
+ input: "2025-08-11T08:00:00Z",
+ expectError: false,
+ expectType: "instant",
+ },
+ {
+ name: "RFC3339 with nanoseconds",
+ input: "2025-08-11T01:00:00.123456789-07:00",
+ expectError: false,
+ expectType: "instant",
+ },
+ {
+ name: "date only YYYY-MM-DD",
+ input: "2025-08-11",
+ expectError: false,
+ expectType: "dateOnly",
+ },
+ {
+ name: "invalid format",
+ input: "2025/08/11",
+ expectError: true,
+ expectType: "",
+ },
+ {
+ name: "invalid date",
+ input: "2025-13-01",
+ expectError: true,
+ expectType: "",
+ },
+ {
+ name: "too short date",
+ input: "2025-8-1",
+ expectError: true,
+ expectType: "",
+ },
+ {
+ name: "too long date",
+ input: "2025-08-011",
+ expectError: true,
+ expectType: "",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result, err := parseTimeValue(tt.input, loc)
+
+ if tt.expectError {
+ if err == nil {
+ t.Errorf("Expected error but got none")
+ }
+ return
+ }
+
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ return
+ }
+
+ switch tt.expectType {
+ case "empty":
+ if !result.IsEmpty() {
+ t.Errorf("Expected empty result")
+ }
+ case "instant":
+ if result.instant == nil {
+ t.Errorf("Expected instant to be set")
+ }
+ if result.dateOnly != nil {
+ t.Errorf("Expected dateOnly to be nil")
+ }
+ case "dateOnly":
+ if result.dateOnly == nil {
+ t.Errorf("Expected dateOnly to be set")
+ }
+ if result.instant != nil {
+ t.Errorf("Expected instant to be nil")
+ }
+ }
+ })
+ }
+}
+
+func TestIncludeBySince(t *testing.T) {
+ // Use America/Vancouver timezone for testing (UTC-7 or UTC-8 depending on DST)
+ loc, err := time.LoadLocation("America/Vancouver")
+ if err != nil {
+ t.Fatalf("Failed to load timezone: %v", err)
+ }
+
+ // Test cases from the MVP document
+ tests := []struct {
+ name string
+ fileMtime string // local time
+ sinceArg string
+ expectInclude bool
+ }{
+ {
+ name: "file before date boundary",
+ fileMtime: "2025-08-10T23:59:00-07:00",
+ sinceArg: "2025-08-11",
+ expectInclude: false,
+ },
+ {
+ name: "file at start of date",
+ fileMtime: "2025-08-11T00:00:00-07:00",
+ sinceArg: "2025-08-11",
+ expectInclude: true,
+ },
+ {
+ name: "file during date",
+ fileMtime: "2025-08-11T01:00:00-07:00",
+ sinceArg: "2025-08-11",
+ expectInclude: true,
+ },
+ {
+ name: "file at end of date",
+ fileMtime: "2025-08-11T23:59:00-07:00",
+ sinceArg: "2025-08-11",
+ expectInclude: true,
+ },
+ {
+ name: "file after date",
+ fileMtime: "2025-08-12T00:00:00-07:00",
+ sinceArg: "2025-08-11",
+ expectInclude: true,
+ },
+ {
+ name: "instant mode - file before",
+ fileMtime: "2025-08-11T01:00:00-07:00",
+ sinceArg: "2025-08-11T02:00:00-07:00",
+ expectInclude: false,
+ },
+ {
+ name: "instant mode - file after",
+ fileMtime: "2025-08-11T03:00:00-07:00",
+ sinceArg: "2025-08-11T02:00:00-07:00",
+ expectInclude: true,
+ },
+ {
+ name: "instant mode - file exactly at boundary",
+ fileMtime: "2025-08-11T02:00:00-07:00",
+ sinceArg: "2025-08-11T02:00:00-07:00",
+ expectInclude: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Parse file mtime
+ fileMtime, err := time.Parse(time.RFC3339, tt.fileMtime)
+ if err != nil {
+ t.Fatalf("Failed to parse file mtime: %v", err)
+ }
+
+ // Parse since bound
+ sinceBound, err := parseTimeValue(tt.sinceArg, loc)
+ if err != nil {
+ t.Fatalf("Failed to parse since arg: %v", err)
+ }
+
+ // Test inclusion
+ result := includeByTimeBound(fileMtime, sinceBound, loc, false)
+ if result != tt.expectInclude {
+ t.Errorf("Expected include=%v, got include=%v", tt.expectInclude, result)
+ }
+ })
+ }
+}
+
+func TestIncludeBySinceEmpty(t *testing.T) {
+ loc, err := time.LoadLocation("America/Vancouver")
+ if err != nil {
+ t.Fatalf("Failed to load timezone: %v", err)
+ }
+
+ // Test with empty since bound (no filter)
+ emptySince := TimeBound{}
+ testTime := time.Now()
+
+ result := includeByTimeBound(testTime, emptySince, loc, false)
+ if !result {
+ t.Errorf("Expected true for empty since bound, got false")
+ }
+}
+
+func TestTimeBoundIsEmpty(t *testing.T) {
+ tests := []struct {
+ name string
+ bound TimeBound
+ expected bool
+ }{
+ {
+ name: "empty bound",
+ bound: TimeBound{},
+ expected: true,
+ },
+ {
+ name: "instant bound",
+ bound: TimeBound{instant: &time.Time{}},
+ expected: false,
+ },
+ {
+ name: "dateOnly bound",
+ bound: TimeBound{dateOnly: &time.Time{}},
+ expected: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := tt.bound.IsEmpty()
+ if result != tt.expected {
+ t.Errorf("Expected %v, got %v", tt.expected, result)
+ }
+ })
+ }
+}
+
+func TestParseDuration(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expected time.Duration
+ expectError bool
+ }{
+ {
+ name: "empty string",
+ input: "",
+ expectError: true,
+ },
+ {
+ name: "seconds",
+ input: "30s",
+ expected: 30 * time.Second,
+ },
+ {
+ name: "minutes",
+ input: "45m",
+ expected: 45 * time.Minute,
+ },
+ {
+ name: "hours",
+ input: "2h",
+ expected: 2 * time.Hour,
+ },
+ {
+ name: "days",
+ input: "7d",
+ expected: 7 * 24 * time.Hour,
+ },
+ {
+ name: "weeks",
+ input: "2w",
+ expected: 2 * 7 * 24 * time.Hour,
+ },
+ {
+ name: "months",
+ input: "3mo",
+ expected: 3 * 30 * 24 * time.Hour,
+ },
+ {
+ name: "years",
+ input: "1y",
+ expected: 365 * 24 * time.Hour,
+ },
+ {
+ name: "combined hours and minutes",
+ input: "2h30m",
+ expected: 2*time.Hour + 30*time.Minute,
+ },
+ {
+ name: "combined with spaces",
+ input: "2 h 30 m",
+ expected: 2*time.Hour + 30*time.Minute,
+ },
+ {
+ name: "complex combination",
+ input: "1y2mo3w4d5h6m7s",
+ expected: 365*24*time.Hour + 2*30*24*time.Hour + 3*7*24*time.Hour + 4*24*time.Hour + 5*time.Hour + 6*time.Minute + 7*time.Second,
+ },
+ {
+ name: "uppercase",
+ input: "2H30M",
+ expected: 2*time.Hour + 30*time.Minute,
+ },
+ {
+ name: "invalid format",
+ input: "2x",
+ expectError: true,
+ },
+ {
+ name: "no number",
+ input: "h",
+ expectError: true,
+ },
+ {
+ name: "partial match",
+ input: "2h30",
+ expectError: true,
+ },
+ {
+ name: "invalid number",
+ input: "abch",
+ expectError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result, err := parseDuration(tt.input)
+
+ if tt.expectError {
+ if err == nil {
+ t.Errorf("Expected error but got none")
+ }
+ return
+ }
+
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ return
+ }
+
+ if result != tt.expected {
+ t.Errorf("Expected %v, got %v", tt.expected, result)
+ }
+ })
+ }
+}
+
+func TestNewTimeFilter(t *testing.T) {
+ loc, err := time.LoadLocation("America/Vancouver")
+ if err != nil {
+ t.Fatalf("Failed to load timezone: %v", err)
+ }
+
+ now := time.Date(2025, 8, 11, 12, 0, 0, 0, loc)
+
+ tests := []struct {
+ name string
+ since string
+ until string
+ maxAge string
+ minAge string
+ expectError bool
+ expectEmpty bool
+ }{
+ {
+ name: "empty filter",
+ expectEmpty: true,
+ },
+ {
+ name: "since only",
+ since: "2025-08-10",
+ },
+ {
+ name: "until only",
+ until: "2025-08-12",
+ },
+ {
+ name: "max-age only",
+ maxAge: "7d",
+ },
+ {
+ name: "min-age only",
+ minAge: "30d",
+ },
+ {
+ name: "since and until",
+ since: "2025-08-01",
+ until: "2025-08-15",
+ },
+ {
+ name: "max-age and min-age",
+ maxAge: "7d",
+ minAge: "1d",
+ },
+ {
+ name: "all filters",
+ since: "2025-08-01",
+ until: "2025-08-15",
+ maxAge: "30d",
+ minAge: "1d",
+ },
+ {
+ name: "invalid since",
+ since: "invalid",
+ expectError: true,
+ },
+ {
+ name: "invalid until",
+ until: "invalid",
+ expectError: true,
+ },
+ {
+ name: "invalid max-age",
+ maxAge: "invalid",
+ expectError: true,
+ },
+ {
+ name: "invalid min-age",
+ minAge: "invalid",
+ expectError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ filter, err := NewTimeFilter(tt.since, tt.until, tt.maxAge, tt.minAge, now, loc)
+
+ if tt.expectError {
+ if err == nil {
+ t.Errorf("Expected error but got none")
+ }
+ return
+ }
+
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ return
+ }
+
+ if tt.expectEmpty {
+ if !filter.IsEmpty() {
+ t.Errorf("Expected empty filter")
+ }
+ } else {
+ if filter.IsEmpty() {
+ t.Errorf("Expected non-empty filter")
+ }
+ }
+ })
+ }
+}
+
+func TestTimeFilterIncludeByTimeFilter(t *testing.T) {
+ loc, err := time.LoadLocation("America/Vancouver")
+ if err != nil {
+ t.Fatalf("Failed to load timezone: %v", err)
+ }
+
+ now := time.Date(2025, 8, 11, 12, 0, 0, 0, loc)
+
+ tests := []struct {
+ name string
+ since string
+ until string
+ maxAge string
+ minAge string
+ fileMtime string
+ expectInclude bool
+ }{
+ {
+ name: "since filter - file after",
+ since: "2025-08-10",
+ fileMtime: "2025-08-11T10:00:00-07:00",
+ expectInclude: true,
+ },
+ {
+ name: "since filter - file before",
+ since: "2025-08-10",
+ fileMtime: "2025-08-09T10:00:00-07:00",
+ expectInclude: false,
+ },
+ {
+ name: "until filter - file before",
+ until: "2025-08-12",
+ fileMtime: "2025-08-11T10:00:00-07:00",
+ expectInclude: true,
+ },
+ {
+ name: "until filter - file after",
+ until: "2025-08-12",
+ fileMtime: "2025-08-13T10:00:00-07:00",
+ expectInclude: false,
+ },
+ {
+ name: "max-age filter - file recent",
+ maxAge: "7d",
+ fileMtime: "2025-08-10T12:00:00-07:00", // 1 day ago
+ expectInclude: true,
+ },
+ {
+ name: "max-age filter - file old",
+ maxAge: "7d",
+ fileMtime: "2025-08-01T12:00:00-07:00", // 10 days ago
+ expectInclude: false,
+ },
+ {
+ name: "min-age filter - file old",
+ minAge: "7d",
+ fileMtime: "2025-08-01T12:00:00-07:00", // 10 days ago
+ expectInclude: true,
+ },
+ {
+ name: "min-age filter - file recent",
+ minAge: "7d",
+ fileMtime: "2025-08-10T12:00:00-07:00", // 1 day ago
+ expectInclude: false,
+ },
+ {
+ name: "combined filters - all pass",
+ since: "2025-08-01",
+ until: "2025-08-15",
+ maxAge: "30d",
+ minAge: "1d",
+ fileMtime: "2025-08-05T12:00:00-07:00", // 6 days ago
+ expectInclude: true,
+ },
+ {
+ name: "combined filters - since fails",
+ since: "2025-08-10",
+ until: "2025-08-15",
+ maxAge: "30d",
+ minAge: "1d",
+ fileMtime: "2025-08-05T12:00:00-07:00", // 6 days ago
+ expectInclude: false,
+ },
+ {
+ name: "combined filters - until fails",
+ since: "2025-08-01",
+ until: "2025-08-10",
+ maxAge: "30d",
+ minAge: "1d",
+ fileMtime: "2025-08-12T12:00:00-07:00", // future
+ expectInclude: false,
+ },
+ {
+ name: "combined filters - max-age fails",
+ since: "2025-08-01",
+ until: "2025-08-15",
+ maxAge: "5d",
+ minAge: "1d",
+ fileMtime: "2025-08-01T12:00:00-07:00", // 10 days ago
+ expectInclude: false,
+ },
+ {
+ name: "combined filters - min-age fails",
+ since: "2025-08-01",
+ until: "2025-08-15",
+ maxAge: "30d",
+ minAge: "5d",
+ fileMtime: "2025-08-10T12:00:00-07:00", // 1 day ago
+ expectInclude: false,
+ },
+ {
+ name: "date-only since and max-age - fail",
+ since: "2025-08-10",
+ maxAge: "3d",
+ fileMtime: "2025-08-09T12:00:00-07:00", // 2 days old, but before since date
+ expectInclude: false,
+ },
+ {
+ name: "date-only since and max-age - pass",
+ since: "2025-08-10",
+ maxAge: "3d",
+ fileMtime: "2025-08-10T12:00:00-07:00", // 1 day old, and on since date
+ expectInclude: true,
+ },
+ {
+ name: "date-only until and min-age - fail",
+ until: "2025-08-10",
+ minAge: "1d",
+ fileMtime: "2025-08-10T12:00:00-07:00", // 1 day old, but not old enough to be excluded by until
+ expectInclude: true,
+ },
+ {
+ name: "date-only until and min-age - pass",
+ until: "2025-08-10",
+ minAge: "2d",
+ fileMtime: "2025-08-08T12:00:00-07:00", // 3 days old, and before until date
+ expectInclude: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Parse file mtime
+ fileMtime, err := time.Parse(time.RFC3339, tt.fileMtime)
+ if err != nil {
+ t.Fatalf("Failed to parse file mtime: %v", err)
+ }
+
+ // Create time filter
+ filter, err := NewTimeFilter(tt.since, tt.until, tt.maxAge, tt.minAge, now, loc)
+ if err != nil {
+ t.Fatalf("Failed to create time filter: %v", err)
+ }
+
+ // Test inclusion
+ result := filter.IncludeByTimeFilter(fileMtime, loc)
+ if result != tt.expectInclude {
+ t.Errorf("Expected include=%v, got include=%v", tt.expectInclude, result)
+ }
+ })
+ }
+}
+
+func TestIncludeByTimeBound(t *testing.T) {
+ loc, err := time.LoadLocation("America/Vancouver")
+ if err != nil {
+ t.Fatalf("Failed to load timezone: %v", err)
+ }
+
+ tests := []struct {
+ name string
+ boundArg string
+ fileMtime string
+ isUntil bool
+ expectInclude bool
+ }{
+ {
+ name: "since instant - file after",
+ boundArg: "2025-08-11T10:00:00-07:00",
+ fileMtime: "2025-08-11T11:00:00-07:00",
+ isUntil: false,
+ expectInclude: true,
+ },
+ {
+ name: "since instant - file before",
+ boundArg: "2025-08-11T10:00:00-07:00",
+ fileMtime: "2025-08-11T09:00:00-07:00",
+ isUntil: false,
+ expectInclude: false,
+ },
+ {
+ name: "since instant - file exactly at boundary",
+ boundArg: "2025-08-11T10:00:00-07:00",
+ fileMtime: "2025-08-11T10:00:00-07:00",
+ isUntil: false,
+ expectInclude: true,
+ },
+ {
+ name: "until instant - file before",
+ boundArg: "2025-08-11T10:00:00-07:00",
+ fileMtime: "2025-08-11T09:00:00-07:00",
+ isUntil: true,
+ expectInclude: true,
+ },
+ {
+ name: "until instant - file after",
+ boundArg: "2025-08-11T10:00:00-07:00",
+ fileMtime: "2025-08-11T11:00:00-07:00",
+ isUntil: true,
+ expectInclude: false,
+ },
+ {
+ name: "until instant - file exactly at boundary",
+ boundArg: "2025-08-11T10:00:00-07:00",
+ fileMtime: "2025-08-11T10:00:00-07:00",
+ isUntil: true,
+ expectInclude: true,
+ },
+ {
+ name: "since date - file just before day",
+ boundArg: "2025-08-11",
+ fileMtime: "2025-08-10T23:59:59-07:00",
+ isUntil: false,
+ expectInclude: false,
+ },
+ {
+ name: "since date - file at start of day",
+ boundArg: "2025-08-11",
+ fileMtime: "2025-08-11T00:00:00-07:00",
+ isUntil: false,
+ expectInclude: true,
+ },
+ {
+ name: "since date - file at end of day",
+ boundArg: "2025-08-11",
+ fileMtime: "2025-08-11T23:59:59-07:00",
+ isUntil: false,
+ expectInclude: true,
+ },
+ {
+ name: "since date - file on next day",
+ boundArg: "2025-08-11",
+ fileMtime: "2025-08-12T00:00:00-07:00",
+ isUntil: false,
+ expectInclude: true,
+ },
+ {
+ name: "until date - file on previous day",
+ boundArg: "2025-08-11",
+ fileMtime: "2025-08-10T23:59:59-07:00",
+ isUntil: true,
+ expectInclude: true,
+ },
+ {
+ name: "until date - file at start of day",
+ boundArg: "2025-08-11",
+ fileMtime: "2025-08-11T00:00:00-07:00",
+ isUntil: true,
+ expectInclude: true,
+ },
+ {
+ name: "until date - file at end of day",
+ boundArg: "2025-08-11",
+ fileMtime: "2025-08-11T23:59:59-07:00",
+ isUntil: true,
+ expectInclude: true,
+ },
+ {
+ name: "until date - file just after day",
+ boundArg: "2025-08-11",
+ fileMtime: "2025-08-12T00:00:00-07:00",
+ isUntil: true,
+ expectInclude: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Parse time bound
+ bound, err := parseTimeValue(tt.boundArg, loc)
+ if err != nil {
+ t.Fatalf("Failed to parse time bound: %v", err)
+ }
+
+ // Parse file mtime
+ fileMtime, err := time.Parse(time.RFC3339, tt.fileMtime)
+ if err != nil {
+ t.Fatalf("Failed to parse file mtime: %v", err)
+ }
+
+ // Test inclusion
+ result := includeByTimeBound(fileMtime, bound, loc, tt.isUntil)
+ if result != tt.expectInclude {
+ t.Errorf("Expected include=%v, got include=%v", tt.expectInclude, result)
+ }
+ })
+ }
+}
--- /dev/null
+package report
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/dundee/gdu/v5/build"
+ "github.com/dundee/gdu/v5/internal/common"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/device"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/fatih/color"
+)
+
+// UI struct
+type UI struct {
+ *common.UI
+ output io.Writer
+ exportOutput io.Writer
+ red *color.Color
+ orange *color.Color
+ writtenChan chan struct{}
+}
+
+// CreateExportUI creates UI for stdout
+func CreateExportUI(
+ output io.Writer,
+ exportOutput io.Writer,
+ useColors bool,
+ showProgress bool,
+ useSIPrefix bool,
+) *UI {
+ ui := &UI{
+ UI: &common.UI{
+ ShowProgress: showProgress,
+ Analyzer: analyze.CreateAnalyzer(),
+ UseSIPrefix: useSIPrefix,
+ },
+ output: output,
+ exportOutput: exportOutput,
+ writtenChan: make(chan struct{}),
+ }
+ ui.red = color.New(color.FgRed).Add(color.Bold)
+ ui.orange = color.New(color.FgYellow).Add(color.Bold)
+
+ if !useColors {
+ color.NoColor = true
+ }
+
+ return ui
+}
+
+// StartUILoop stub
+func (ui *UI) StartUILoop() error {
+ return nil
+}
+
+// SetCollapsePath sets the flag to collapse paths
+func (ui *UI) SetCollapsePath(value bool) {
+}
+
+// ListDevices lists mounted devices and shows their disk usage
+func (ui *UI) ListDevices(getter device.DevicesInfoGetter) error {
+ return errors.New("exporting devices list is not supported")
+}
+
+// ReadAnalysis reads analysis report from JSON file
+func (ui *UI) ReadAnalysis(input io.Reader) error {
+ return errors.New("reading analysis is not possible while exporting")
+}
+
+// ReadFromStorage reads analysis data from persistent key-value storage
+func (ui *UI) ReadFromStorage(storagePath, path string) error {
+ storage := analyze.NewStorage(storagePath, path)
+ closeFn := storage.Open()
+ defer closeFn()
+
+ dir, err := storage.GetDirForPath(path)
+ if err != nil {
+ return err
+ }
+
+ var waitWritten sync.WaitGroup
+ if ui.ShowProgress {
+ waitWritten.Add(1)
+ go func() {
+ defer waitWritten.Done()
+ ui.updateProgress()
+ }()
+ }
+
+ return ui.exportDir(dir, &waitWritten)
+}
+
+// AnalyzePath analyzes recursively disk usage in given path
+func (ui *UI) AnalyzePath(path string, _ fs.Item) error {
+ var (
+ dir fs.Item
+ wait sync.WaitGroup
+ waitWritten sync.WaitGroup
+ )
+
+ if ui.ShowProgress {
+ waitWritten.Add(1)
+ go func() {
+ defer waitWritten.Done()
+ ui.updateProgress()
+ }()
+ }
+
+ wait.Add(1)
+ go func() {
+ defer wait.Done()
+ dir = ui.Analyzer.AnalyzeDir(path, ui.CreateIgnoreFunc(), ui.CreateFileTypeFilter())
+ dir.UpdateStats(make(fs.HardLinkedItems, 10))
+ }()
+
+ wait.Wait()
+
+ return ui.exportDir(dir, &waitWritten)
+}
+
+func (ui *UI) exportDir(dir fs.Item, waitWritten *sync.WaitGroup) error {
+ // Sorting is now handled by GetFiles with sort parameters
+
+ var (
+ buff bytes.Buffer
+ err error
+ )
+
+ buff.Write([]byte(`[1,2,{"progname":"gdu","progver":"`))
+ buff.Write([]byte(build.Version))
+ buff.Write([]byte(`","timestamp":`))
+ buff.Write([]byte(strconv.FormatInt(time.Now().Unix(), 10)))
+ buff.Write([]byte("},\n"))
+
+ if err := dir.EncodeJSON(&buff, true); err != nil {
+ return err
+ }
+ if _, err = buff.Write([]byte("]\n")); err != nil {
+ return err
+ }
+ if _, err = buff.WriteTo(ui.exportOutput); err != nil {
+ return err
+ }
+
+ if f, ok := ui.exportOutput.(*os.File); ok {
+ err = f.Close()
+ if err != nil {
+ return err
+ }
+ }
+
+ if ui.ShowProgress {
+ ui.writtenChan <- struct{}{}
+ waitWritten.Wait()
+ }
+
+ return nil
+}
+
+func (ui *UI) updateProgress() {
+ waitingForWrite := false
+
+ emptyRow := "\r"
+ for j := 0; j < 100; j++ {
+ emptyRow += " "
+ }
+
+ progressRunes := []rune(`⠇⠏⠋⠙⠹⠸⠼⠴⠦⠧`)
+
+ progressChan := ui.Analyzer.GetProgressChan()
+ doneChan := ui.Analyzer.GetDone()
+
+ var progress common.CurrentProgress
+
+ i := 0
+ for {
+ fmt.Fprint(ui.output, emptyRow)
+
+ select {
+ case progress = <-progressChan:
+ case <-doneChan:
+ fmt.Fprint(ui.output, "\r")
+ waitingForWrite = true
+ case <-ui.writtenChan:
+ fmt.Fprint(ui.output, "\r")
+ return
+ default:
+ }
+
+ fmt.Fprintf(ui.output, "\r %s ", string(progressRunes[i]))
+
+ if waitingForWrite {
+ fmt.Fprint(ui.output, "Writing output file...")
+ } else {
+ fmt.Fprint(ui.output, "Scanning... Total items: "+
+ ui.red.Sprint(common.FormatNumber(int64(progress.ItemCount)))+
+ " size: "+
+ ui.formatSize(progress.TotalSize))
+ }
+
+ time.Sleep(100 * time.Millisecond)
+ i++
+ i %= 10
+ }
+}
+
+func (ui *UI) formatSize(size int64) string {
+ if ui.UseSIPrefix {
+ return ui.formatWithDecPrefix(size)
+ }
+ return ui.formatWithBinPrefix(size)
+}
+
+func (ui *UI) formatWithBinPrefix(size int64) string {
+ fsize := float64(size)
+ asize := math.Abs(fsize)
+
+ switch {
+ case asize >= common.Ei:
+ return ui.orange.Sprintf("%.1f", fsize/common.Ei) + " EiB"
+ case asize >= common.Pi:
+ return ui.orange.Sprintf("%.1f", fsize/common.Pi) + " PiB"
+ case asize >= common.Ti:
+ return ui.orange.Sprintf("%.1f", fsize/common.Ti) + " TiB"
+ case asize >= common.Gi:
+ return ui.orange.Sprintf("%.1f", fsize/common.Gi) + " GiB"
+ case asize >= common.Mi:
+ return ui.orange.Sprintf("%.1f", fsize/common.Mi) + " MiB"
+ case asize >= common.Ki:
+ return ui.orange.Sprintf("%.1f", fsize/common.Ki) + " KiB"
+ default:
+ return ui.orange.Sprintf("%d", size) + " B"
+ }
+}
+
+func (ui *UI) formatWithDecPrefix(size int64) string {
+ fsize := float64(size)
+ asize := math.Abs(fsize)
+
+ switch {
+ case asize >= common.E:
+ return ui.orange.Sprintf("%.1f", fsize/common.E) + " EB"
+ case asize >= common.P:
+ return ui.orange.Sprintf("%.1f", fsize/common.P) + " PB"
+ case asize >= common.T:
+ return ui.orange.Sprintf("%.1f", fsize/common.T) + " TB"
+ case asize >= common.G:
+ return ui.orange.Sprintf("%.1f", fsize/common.G) + " GB"
+ case asize >= common.M:
+ return ui.orange.Sprintf("%.1f", fsize/common.M) + " MB"
+ case asize >= common.K:
+ return ui.orange.Sprintf("%.1f", fsize/common.K) + " kB"
+ default:
+ return ui.orange.Sprintf("%d", size) + " B"
+ }
+}
--- /dev/null
+//go:build linux
+
+package report
+
+import (
+ "bytes"
+ "os"
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestReadFromStorage(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ const storagePath = "/tmp/badger-test2"
+ defer func() {
+ err := os.RemoveAll(storagePath)
+ if err != nil {
+ panic(err)
+ }
+ }()
+
+ output := bytes.NewBuffer(make([]byte, 10))
+ reportOutput := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateExportUI(output, reportOutput, false, true, false)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ ui.SetAnalyzer(analyze.CreateStoredAnalyzer(storagePath))
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.ReadFromStorage(storagePath, "test_dir")
+
+ assert.Nil(t, err)
+ assert.Contains(t, reportOutput.String(), `"name":"nested"`)
+}
+
+func TestReadFromStorageWithErr(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ const storagePath = "/tmp/badger-test3"
+
+ output := bytes.NewBuffer(make([]byte, 10))
+ reportOutput := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateExportUI(output, reportOutput, false, false, false)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.ReadFromStorage(storagePath, "test_dir")
+
+ assert.ErrorContains(t, err, "Key not found")
+}
--- /dev/null
+package report
+
+import (
+ "bytes"
+ "os"
+ "testing"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/device"
+ "github.com/stretchr/testify/assert"
+)
+
+func init() {
+ log.SetLevel(log.WarnLevel)
+}
+
+func TestAnalyzePath(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ output := bytes.NewBuffer(make([]byte, 10))
+ reportOutput := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateExportUI(output, reportOutput, false, false, false)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+
+ assert.Nil(t, err)
+ assert.Contains(t, reportOutput.String(), `"name":"nested"`)
+}
+
+func TestAnalyzePathWithProgress(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ output := bytes.NewBuffer(make([]byte, 10))
+ reportOutput := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateExportUI(output, reportOutput, true, true, true)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+
+ assert.Nil(t, err)
+ assert.Contains(t, reportOutput.String(), `"name":"nested"`)
+}
+
+func TestShowDevices(t *testing.T) {
+ output := bytes.NewBuffer(make([]byte, 10))
+ reportOutput := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateExportUI(output, reportOutput, false, true, false)
+ err := ui.ListDevices(device.Getter)
+
+ assert.Contains(t, err.Error(), "not supported")
+}
+
+func TestReadAnalysisWhileExporting(t *testing.T) {
+ output := bytes.NewBuffer(make([]byte, 10))
+ reportOutput := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateExportUI(output, reportOutput, false, true, false)
+ err := ui.ReadAnalysis(output)
+
+ assert.Contains(t, err.Error(), "not possible while exporting")
+}
+
+func TestExportToFile(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ reportOutput, err := os.OpenFile("output.json", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
+ assert.Nil(t, err)
+ defer func() {
+ os.Remove("output.json")
+ }()
+
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateExportUI(output, reportOutput, false, true, false)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err = ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+ assert.Nil(t, err)
+
+ reportOutput, err = os.OpenFile("output.json", os.O_RDONLY, 0o644)
+ assert.Nil(t, err)
+ _, err = reportOutput.Seek(0, 0)
+ assert.Nil(t, err)
+ buff := make([]byte, 200)
+ _, err = reportOutput.Read(buff)
+ assert.Nil(t, err)
+
+ assert.Contains(t, string(buff), `"name":"nested"`)
+}
+
+func TestFormatSize(t *testing.T) {
+ output := bytes.NewBuffer(make([]byte, 10))
+ reportOutput := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateExportUI(output, reportOutput, false, true, false)
+
+ assert.Contains(t, ui.formatSize(1), "B")
+ assert.Contains(t, ui.formatSize(1<<10+1), "KiB")
+ assert.Contains(t, ui.formatSize(1<<20+1), "MiB")
+ assert.Contains(t, ui.formatSize(1<<30+1), "GiB")
+ assert.Contains(t, ui.formatSize(1<<40+1), "TiB")
+ assert.Contains(t, ui.formatSize(1<<50+1), "PiB")
+ assert.Contains(t, ui.formatSize(1<<60+1), "EiB")
+ assert.Contains(t, ui.formatSize(-1<<10-1), "KiB")
+}
+
+func TestFormatSizeDec(t *testing.T) {
+ output := bytes.NewBuffer(make([]byte, 10))
+ reportOutput := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateExportUI(output, reportOutput, false, true, true)
+
+ assert.Contains(t, ui.formatSize(1), "B")
+ assert.Contains(t, ui.formatSize(1<<10+1), "kB")
+ assert.Contains(t, ui.formatSize(1<<20+1), "MB")
+ assert.Contains(t, ui.formatSize(1<<30+1), "GB")
+ assert.Contains(t, ui.formatSize(1<<40+1), "TB")
+ assert.Contains(t, ui.formatSize(1<<50+1), "PB")
+ assert.Contains(t, ui.formatSize(1<<60+1), "EB")
+ assert.Contains(t, ui.formatSize(-1<<10-1), "kB")
+}
--- /dev/null
+package report
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/dundee/gdu/v5/pkg/analyze"
+)
+
+// ReadAnalysis reads analysis report from JSON file and returns directory item
+func ReadAnalysis(input io.Reader) (dir *analyze.Dir, err error) {
+ var data interface{}
+
+ var buff bytes.Buffer
+ if _, err = buff.ReadFrom(input); err != nil {
+ return nil, err
+ }
+ if err := json.Unmarshal(buff.Bytes(), &data); err != nil {
+ return nil, err
+ }
+
+ dataArray, ok := data.([]interface{})
+ if !ok {
+ return nil, errors.New("JSON file does not contain top level array")
+ }
+ if len(dataArray) < 4 {
+ return nil, errors.New("top level array must have at least 4 items")
+ }
+
+ items, ok := dataArray[3].([]interface{})
+ if !ok {
+ return nil, errors.New("array of maps not found in the top level array on 4th position")
+ }
+
+ return processDir(items)
+}
+
+func processDir(items []interface{}) (dir *analyze.Dir, err error) {
+ dir = &analyze.Dir{
+ File: &analyze.File{
+ Flag: ' ',
+ },
+ }
+ dirMap, ok := items[0].(map[string]interface{})
+ if !ok {
+ return nil, errors.New("directory item is not a map")
+ }
+ name, ok := dirMap["name"].(string)
+ if !ok {
+ return nil, errors.New("directory name is not a string")
+ }
+ if mtime, ok := dirMap["mtime"].(float64); ok {
+ dir.Mtime = time.Unix(int64(mtime), 0)
+ }
+
+ slashPos := strings.LastIndex(name, "/")
+ if slashPos > -1 {
+ dir.Name = name[slashPos+1:]
+ dir.BasePath = name[:slashPos+1]
+ } else {
+ dir.Name = name
+ }
+
+ for _, v := range items[1:] {
+ switch item := v.(type) {
+ case map[string]interface{}:
+ file := &analyze.File{}
+ file.Name = item["name"].(string)
+
+ if asize, ok := item["asize"].(float64); ok {
+ file.Size = int64(asize)
+ }
+ if dsize, ok := item["dsize"].(float64); ok {
+ file.Usage = int64(dsize)
+ }
+ if mtime, ok := item["mtime"].(float64); ok {
+ file.Mtime = time.Unix(int64(mtime), 0)
+ }
+ if _, ok := item["notreg"].(bool); ok {
+ file.Flag = '@'
+ } else {
+ file.Flag = ' '
+ }
+ if mli, ok := item["ino"].(float64); ok {
+ file.Mli = uint64(mli)
+ }
+ if _, ok := item["hlnkc"].(bool); ok {
+ file.Flag = 'H'
+ }
+
+ file.Parent = dir
+
+ dir.AddFile(file)
+ case []interface{}:
+ subdir, err := processDir(item)
+ if err != nil {
+ return nil, err
+ }
+ subdir.Parent = dir
+ dir.AddFile(subdir)
+ }
+ }
+
+ return dir, nil
+}
--- /dev/null
+package report
+
+import (
+ "bytes"
+ "errors"
+ "testing"
+
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ log "github.com/sirupsen/logrus"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func init() {
+ log.SetLevel(log.WarnLevel)
+}
+
+func TestReadAnalysis(t *testing.T) {
+ buff := bytes.NewBuffer([]byte(`
+ [1,2,{"progname":"gdu","progver":"development","timestamp":1626806293},
+ [{"name":"/home/xxx","mtime":1629333600},
+ {"name":"gdu.json","asize":33805233,"dsize":33808384},
+ {"name":"sock","notreg":true},
+ [{"name":"app"},
+ {"name":"app.go","asize":4638,"dsize":8192},
+ {"name":"app_linux_test.go","asize":1410,"dsize":4096},
+ {"name":"app_linux_test2.go","ino":1234,"hlnkc":true,"asize":1410,"dsize":4096},
+ {"name":"app_test.go","asize":4974,"dsize":8192}],
+ {"name":"main.go","asize":3205,"dsize":4096,"mtime":1629333600}]]
+ `))
+
+ dir, err := ReadAnalysis(buff)
+
+ assert.Nil(t, err)
+ assert.Equal(t, "xxx", dir.GetName())
+ assert.Equal(t, "/home/xxx", dir.GetPath())
+ assert.Equal(t, 2021, dir.GetMtime().Year())
+ assert.Equal(t, 2021, dir.Files[3].GetMtime().Year())
+ alt2 := dir.Files[2].(*analyze.Dir).Files[2].(*analyze.File)
+ assert.Equal(t, "app_linux_test2.go", alt2.Name)
+ assert.Equal(t, uint64(1234), alt2.Mli)
+ assert.Equal(t, 'H', alt2.Flag)
+}
+
+func TestReadAnalysisWithEmptyInput(t *testing.T) {
+ buff := bytes.NewBuffer([]byte(``))
+
+ _, err := ReadAnalysis(buff)
+
+ assert.Equal(t, "unexpected end of JSON input", err.Error())
+}
+
+func TestReadAnalysisWithEmptyDict(t *testing.T) {
+ buff := bytes.NewBuffer([]byte(`{}`))
+
+ _, err := ReadAnalysis(buff)
+
+ assert.Equal(t, "JSON file does not contain top level array", err.Error())
+}
+
+func TestReadFromBrokenInput(t *testing.T) {
+ _, err := ReadAnalysis(&BrokenInput{})
+
+ assert.Equal(t, "IO error", err.Error())
+}
+
+func TestReadAnalysisWithEmptyArray(t *testing.T) {
+ buff := bytes.NewBuffer([]byte(`[]`))
+
+ _, err := ReadAnalysis(buff)
+
+ assert.Equal(t, "top level array must have at least 4 items", err.Error())
+}
+
+func TestReadAnalysisWithWrongContent(t *testing.T) {
+ buff := bytes.NewBuffer([]byte(`[1,2,3,4]`))
+
+ _, err := ReadAnalysis(buff)
+
+ assert.Equal(t, "array of maps not found in the top level array on 4th position", err.Error())
+}
+
+func TestReadAnalysisWithEmptyDirContent(t *testing.T) {
+ buff := bytes.NewBuffer([]byte(`[1,2,3,[{}]]`))
+
+ _, err := ReadAnalysis(buff)
+
+ assert.Equal(t, "directory name is not a string", err.Error())
+}
+
+func TestReadAnalysisWithWrongDirItem(t *testing.T) {
+ buff := bytes.NewBuffer([]byte(`[1,2,3,[1, 2, 3]]`))
+
+ _, err := ReadAnalysis(buff)
+
+ assert.Equal(t, "directory item is not a map", err.Error())
+}
+
+func TestReadAnalysisWithWrongSubdirItem(t *testing.T) {
+ buff := bytes.NewBuffer([]byte(`[1,2,3,[{"name":"xxx"}, [1,2,3]]]`))
+
+ _, err := ReadAnalysis(buff)
+
+ assert.Equal(t, "directory item is not a map", err.Error())
+}
+
+type BrokenInput struct{}
+
+func (i *BrokenInput) Read(p []byte) (n int, err error) {
+ return 0, errors.New("IO error")
+}
--- /dev/null
+name: gdu-disk-usage-analyzer
+version: git
+summary: Pretty fast disk usage analyzer written in Go.
+description: |
+ Gdu is intended primarily for SSD disks where it can fully utilize parallel processing.
+ However HDDs work as well, but the performance gain is not so huge.
+confinement: strict
+base: core20
+parts:
+ gdu:
+ plugin: go
+ source: .
+ override-build: |
+ GO111MODULE=on CGO_ENABLED=0 go build \
+ -buildmode=pie -trimpath -mod=readonly -modcacherw \
+ -ldflags \
+ "-s -w \
+ -X 'github.com/dundee/gdu/v5/build.Version=$(git describe)' \
+ -X 'github.com/dundee/gdu/v5/build.User=$(id -u -n)' \
+ -X 'github.com/dundee/gdu/v5/build.Time=$(LC_ALL=en_US.UTF-8 date)' \
+ -X 'github.com/dundee/gdu/v5/build.RootPathPrefix=/var/lib/snapd/hostfs'" \
+ -o $SNAPCRAFT_PART_INSTALL/gdu \
+ github.com/dundee/gdu/v5/cmd/gdu
+ $SNAPCRAFT_PART_INSTALL/gdu -v
+apps:
+ gdu:
+ command: gdu
+ plugs:
+ - mount-observe
+ - system-backup
--- /dev/null
+package stdout
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "runtime"
+ "sync"
+ "time"
+
+ "github.com/dundee/gdu/v5/internal/common"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/device"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/dundee/gdu/v5/report"
+ "github.com/fatih/color"
+)
+
+// UI struct
+type UI struct {
+ output io.Writer
+ *common.UI
+ red *color.Color
+ orange *color.Color
+ blue *color.Color
+ showItemCnt bool
+ top int
+ depth int
+ summarize bool
+ noPrefix bool
+ fixedBase float64
+ fixedSuffix string
+ reverseSort bool
+}
+
+var (
+ progressRunes = []rune(`⠇⠏⠋⠙⠹⠸⠼⠴⠦⠧`)
+ progressRunesOld = []rune(`-\\|/`)
+ progressRunesCount = len(progressRunes)
+)
+
+// CreateStdoutUI creates UI for stdout
+func CreateStdoutUI(
+ output io.Writer,
+ useColors bool,
+ showProgress bool,
+ showApparentSize bool,
+ showRelativeSize bool,
+ summarize bool,
+ useSIPrefix bool,
+ noPrefix bool,
+ fixedUnit string,
+ top int,
+ reverseSort bool,
+ depth int,
+) *UI {
+ ui := &UI{
+ UI: &common.UI{
+ UseColors: useColors,
+ ShowProgress: showProgress,
+ ShowApparentSize: showApparentSize,
+ ShowRelativeSize: showRelativeSize,
+ Analyzer: analyze.CreateAnalyzer(),
+ UseSIPrefix: useSIPrefix,
+ },
+ output: output,
+ summarize: summarize,
+ noPrefix: noPrefix,
+ top: top,
+ reverseSort: reverseSort,
+ depth: depth,
+ }
+ if fixedUnit != "" {
+ ui.SetFixedUnit(fixedUnit)
+ }
+ ui.red = color.New(color.FgRed).Add(color.Bold)
+ ui.orange = color.New(color.FgYellow).Add(color.Bold)
+ ui.blue = color.New(color.FgBlue).Add(color.Bold)
+
+ if !useColors {
+ color.NoColor = true
+ }
+
+ return ui
+}
+func (ui *UI) SetFixedUnit(unitChar string) {
+ k, m, g := common.Ki, common.Mi, common.Gi
+ suffixMap := map[string]string{"k": " KiB", "m": " MiB", "g": " GiB"}
+
+ if ui.UseSIPrefix {
+ k, m, g = common.K, common.M, common.G
+ suffixMap = map[string]string{"k": " kB", "m": " MB", "g": " GB"}
+ }
+
+ switch unitChar {
+ case "k":
+ ui.fixedBase = k
+ ui.fixedSuffix = suffixMap["k"]
+ case "m":
+ ui.fixedBase = m
+ ui.fixedSuffix = suffixMap["m"]
+ case "g":
+ ui.fixedBase = g
+ ui.fixedSuffix = suffixMap["g"]
+ }
+}
+
+func (ui *UI) SetShowItemCount() {
+ ui.showItemCnt = true
+}
+
+func (ui *UI) UseOldProgressRunes() {
+ progressRunes = progressRunesOld
+ progressRunesCount = len(progressRunes)
+}
+
+// StartUILoop stub
+func (ui *UI) StartUILoop() error {
+ return nil
+}
+
+// SetCollapsePath sets the flag to collapse paths
+func (ui *UI) SetCollapsePath(value bool) {
+}
+
+// ListDevices lists mounted devices and shows their disk usage
+func (ui *UI) ListDevices(getter device.DevicesInfoGetter) error {
+ devices, err := getter.GetDevicesInfo()
+ if err != nil {
+ return err
+ }
+
+ maxDeviceNameLength := maxInt(maxLength(
+ devices,
+ func(device *device.Device) string { return device.Name },
+ ), len("Devices"))
+
+ var sizeLength, percentLength int
+ if ui.UseColors {
+ sizeLength = 20
+ percentLength = 16
+ } else {
+ sizeLength = 9
+ percentLength = 5
+ }
+
+ lineFormat := fmt.Sprintf(
+ "%%%ds %%%ds %%%ds %%%ds %%%ds %%s\n",
+ maxDeviceNameLength,
+ sizeLength,
+ sizeLength,
+ sizeLength,
+ percentLength,
+ )
+
+ fmt.Fprintf(
+ ui.output,
+ fmt.Sprintf("%%%ds %%9s %%9s %%9s %%5s %%s\n", maxDeviceNameLength),
+ "Device",
+ "Size",
+ "Used",
+ "Free",
+ "Used%",
+ "Mount point",
+ )
+
+ for _, device := range devices {
+ usedPercent := math.Round(float64(device.Size-device.Free) / float64(device.Size) * 100)
+
+ fmt.Fprintf(
+ ui.output,
+ lineFormat,
+ device.Name,
+ ui.formatSize(device.Size),
+ ui.formatSize(device.Size-device.Free),
+ ui.formatSize(device.Free),
+ ui.red.Sprintf("%.f%%", usedPercent),
+ device.MountPoint)
+ }
+
+ return nil
+}
+
+// AnalyzePath analyzes recursively disk usage in given path
+func (ui *UI) AnalyzePath(path string, _ fs.Item) error {
+ var (
+ dir fs.Item
+ wait sync.WaitGroup
+ updateStatsDone chan struct{}
+ )
+ updateStatsDone = make(chan struct{}, 1)
+
+ if ui.ShowProgress {
+ wait.Add(1)
+ go func() {
+ defer wait.Done()
+ ui.updateProgress(updateStatsDone)
+ }()
+ }
+
+ wait.Add(1)
+ go func() {
+ defer wait.Done()
+ dir = ui.Analyzer.AnalyzeDir(path, ui.CreateIgnoreFunc(), ui.CreateFileTypeFilter())
+ dir.UpdateStats(make(fs.HardLinkedItems, 10))
+ updateStatsDone <- struct{}{}
+ }()
+
+ wait.Wait()
+
+ switch {
+ case ui.top > 0:
+ ui.printTopFiles(dir)
+ case ui.depth > 0:
+ ui.printDirWithDepth(dir, 0)
+ case ui.summarize:
+ ui.printTotalItem(dir)
+ default:
+ ui.showDir(dir)
+ }
+
+ return nil
+}
+
+// ReadFromStorage reads analysis data from persistent key-value storage
+func (ui *UI) ReadFromStorage(storagePath, path string) error {
+ storage := analyze.NewStorage(storagePath, path)
+ closeFn := storage.Open()
+ defer closeFn()
+
+ dir, err := storage.GetDirForPath(path)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case ui.top > 0:
+ ui.printTopFiles(dir)
+ case ui.summarize:
+ ui.printTotalItem(dir)
+ default:
+ ui.showDir(dir)
+ }
+ return nil
+}
+
+func (ui *UI) showDir(dir fs.Item) {
+ sortOrder := fs.SortDesc
+ if ui.reverseSort {
+ sortOrder = fs.SortAsc
+ }
+
+ for file := range dir.GetFiles(fs.SortBySize, sortOrder) {
+ ui.printItem(file)
+ }
+}
+
+func (ui *UI) printTopFiles(file fs.Item) {
+ collected := analyze.CollectTopFiles(file, ui.top)
+ for _, file := range collected {
+ ui.printItemPath(file)
+ }
+}
+
+func (ui *UI) printTotalItem(file fs.Item) {
+ var lineFormat string
+ if ui.UseColors {
+ lineFormat = "%20s %s\n"
+ } else {
+ lineFormat = "%9s %s\n"
+ }
+
+ var size int64
+ if ui.ShowApparentSize {
+ size = file.GetSize()
+ } else {
+ size = file.GetUsage()
+ }
+
+ fmt.Fprintf(
+ ui.output,
+ lineFormat,
+ ui.formatSize(size),
+ file.GetName(),
+ )
+}
+
+func (ui *UI) printItem(file fs.Item) {
+ var lineFormat string
+ if ui.showItemCnt {
+ if ui.UseColors {
+ lineFormat = "%s %23s %25s %s\n"
+ } else {
+ lineFormat = "%s %9s %11s %s\n"
+ }
+ } else {
+ if ui.UseColors {
+ lineFormat = "%s %23s %s\n"
+ } else {
+ lineFormat = "%s %9s %s\n"
+ }
+ }
+
+ var size int64
+ if ui.ShowApparentSize {
+ size = file.GetSize()
+ } else {
+ size = file.GetUsage()
+ }
+
+ countToDisplay := file.GetItemCount()
+ if file.IsDir() {
+ countToDisplay--
+ }
+
+ name := file.GetName()
+ if file.IsDir() {
+ name = ui.blue.Sprint("/" + file.GetName())
+ }
+
+ if ui.showItemCnt {
+ fmt.Fprintf(
+ ui.output,
+ lineFormat,
+ string(file.GetFlag()),
+ ui.formatSize(size),
+ ui.formatCount(countToDisplay),
+ name,
+ )
+ return
+ }
+
+ fmt.Fprintf(
+ ui.output,
+ lineFormat,
+ string(file.GetFlag()),
+ ui.formatSize(size),
+ name,
+ )
+}
+
+func (ui *UI) printItemPath(file fs.Item) {
+ var lineFormat string
+ if ui.UseColors {
+ lineFormat = "%20s %s\n"
+ } else {
+ lineFormat = "%9s %s\n"
+ }
+
+ var size int64
+ if ui.ShowApparentSize {
+ size = file.GetSize()
+ } else {
+ size = file.GetUsage()
+ }
+
+ if file.IsDir() {
+ fmt.Fprintf(ui.output,
+ lineFormat,
+ ui.formatSize(size),
+ ui.blue.Sprint(file.GetPath()))
+ } else {
+ fmt.Fprintf(ui.output,
+ lineFormat,
+ ui.formatSize(size),
+ file.GetPath())
+ }
+}
+
+func (ui *UI) printDirWithDepth(dir fs.Item, currentDepth int) {
+ // Print current directory
+ ui.printItemPath(dir)
+
+ // If we haven't reached the max depth, print contents
+ if currentDepth < ui.depth && dir.IsDir() {
+ sortOrder := fs.SortDesc
+ if ui.reverseSort {
+ sortOrder = fs.SortAsc
+ }
+
+ files := dir.GetFiles(fs.SortBySize, sortOrder)
+
+ // Print all files at this depth level
+ for file := range files {
+ if file.IsDir() {
+ // Recurse into subdirectories
+ ui.printDirWithDepth(file, currentDepth+1)
+ } else {
+ // Print regular files
+ ui.printItemPath(file)
+ }
+ }
+ }
+}
+
+// ReadAnalysis reads analysis report from JSON file
+func (ui *UI) ReadAnalysis(input io.Reader) error {
+ var (
+ dir fs.Item
+ wait sync.WaitGroup
+ err error
+ doneChan chan struct{}
+ )
+
+ if ui.ShowProgress {
+ wait.Add(1)
+ doneChan = make(chan struct{})
+ go func() {
+ defer wait.Done()
+ ui.showReadingProgress(doneChan)
+ }()
+ }
+
+ wait.Add(1)
+ go func() {
+ defer wait.Done()
+ dir, err = report.ReadAnalysis(input)
+ if err != nil {
+ if ui.ShowProgress {
+ doneChan <- struct{}{}
+ }
+ return
+ }
+ runtime.GC()
+
+ dir.UpdateStats(make(fs.HardLinkedItems, 10))
+
+ if ui.ShowProgress {
+ doneChan <- struct{}{}
+ }
+ }()
+
+ wait.Wait()
+
+ if err != nil {
+ return err
+ }
+
+ if ui.summarize {
+ ui.printTotalItem(dir)
+ } else {
+ ui.showDir(dir)
+ }
+
+ return nil
+}
+
+func (ui *UI) showReadingProgress(doneChan chan struct{}) {
+ emptyRow := "\r"
+ for j := 0; j < 40; j++ {
+ emptyRow += " "
+ }
+
+ i := 0
+ for {
+ fmt.Fprint(ui.output, emptyRow)
+
+ select {
+ case <-doneChan:
+ fmt.Fprint(ui.output, "\r")
+ return
+ default:
+ }
+
+ fmt.Fprintf(ui.output, "\r %s ", string(progressRunes[i]))
+ fmt.Fprint(ui.output, "Reading analysis from file...")
+
+ time.Sleep(100 * time.Millisecond)
+ i++
+ i %= progressRunesCount
+ }
+}
+
+func (ui *UI) updateProgress(updateStatsDone <-chan struct{}) {
+ emptyRow := "\r"
+ for j := 0; j < 100; j++ {
+ emptyRow += " "
+ }
+
+ progressChan := ui.Analyzer.GetProgressChan()
+ analysisDoneChan := ui.Analyzer.GetDone()
+ ticker := time.NewTicker(100 * time.Millisecond)
+ defer ticker.Stop()
+
+ var progress common.CurrentProgress
+
+ i := 0
+ for {
+ select {
+ case <-ticker.C:
+ select {
+ case progress = <-progressChan:
+ fmt.Fprint(ui.output, emptyRow)
+ fmt.Fprintf(ui.output, "\r %s ", string(progressRunes[i]))
+ fmt.Fprint(ui.output, "Scanning... Total items: "+
+ ui.red.Sprint(common.FormatNumber(int64(progress.ItemCount)))+
+ " size: "+
+ ui.formatSize(progress.TotalSize))
+ default:
+ // Update only the spinner without clearing the line
+ fmt.Fprintf(ui.output, "\r %s ", string(progressRunes[i]))
+ }
+ i++
+ i %= progressRunesCount
+ case <-analysisDoneChan:
+ ticker.Stop()
+ fmt.Fprint(ui.output, emptyRow)
+ for {
+ fmt.Fprint(ui.output, emptyRow)
+ fmt.Fprintf(ui.output, "\r %s ", string(progressRunes[i]))
+ fmt.Fprint(ui.output, "Calculating disk usage...")
+ time.Sleep(100 * time.Millisecond)
+ i++
+ i %= progressRunesCount
+
+ select {
+ case <-updateStatsDone:
+ fmt.Fprint(ui.output, emptyRow)
+ fmt.Fprint(ui.output, "\r")
+ return
+ default:
+ }
+ }
+ }
+ }
+}
+
+func (ui *UI) formatCount(count int64) string {
+ count64 := float64(count)
+
+ switch {
+ case count64 >= common.G:
+ return ui.red.Sprintf("%.1f", float64(count)/float64(common.G)) + "G"
+ case count64 >= common.M:
+ return ui.red.Sprintf("%.1f", float64(count)/float64(common.M)) + "M"
+ case count64 >= common.K:
+ return ui.red.Sprintf("%.1f", float64(count)/float64(common.K)) + "k"
+ default:
+ return ui.red.Sprintf("%d", count)
+ }
+}
+
+func (ui *UI) formatSize(size int64) string {
+ if ui.noPrefix {
+ return ui.orange.Sprintf("%d", size)
+ }
+ if ui.fixedBase > 0 {
+ val := float64(size) / ui.fixedBase
+ return ui.orange.Sprintf("%.1f", val) + ui.fixedSuffix
+ }
+ if ui.UseSIPrefix {
+ return ui.formatWithDecPrefix(size)
+ }
+ return ui.formatWithBinPrefix(size)
+}
+
+func (ui *UI) formatWithBinPrefix(size int64) string {
+ fsize := float64(size)
+ asize := math.Abs(fsize)
+
+ switch {
+ case asize >= common.Ei:
+ return ui.orange.Sprintf("%.1f", fsize/common.Ei) + " EiB"
+ case asize >= common.Pi:
+ return ui.orange.Sprintf("%.1f", fsize/common.Pi) + " PiB"
+ case asize >= common.Ti:
+ return ui.orange.Sprintf("%.1f", fsize/common.Ti) + " TiB"
+ case asize >= common.Gi:
+ return ui.orange.Sprintf("%.1f", fsize/common.Gi) + " GiB"
+ case asize >= common.Mi:
+ return ui.orange.Sprintf("%.1f", fsize/common.Mi) + " MiB"
+ case asize >= common.Ki:
+ return ui.orange.Sprintf("%.1f", fsize/common.Ki) + " KiB"
+ default:
+ return ui.orange.Sprintf("%d", size) + " B"
+ }
+}
+
+func (ui *UI) formatWithDecPrefix(size int64) string {
+ fsize := float64(size)
+ asize := math.Abs(fsize)
+
+ switch {
+ case asize >= common.E:
+ return ui.orange.Sprintf("%.1f", fsize/common.E) + " EB"
+ case asize >= common.P:
+ return ui.orange.Sprintf("%.1f", fsize/common.P) + " PB"
+ case asize >= common.T:
+ return ui.orange.Sprintf("%.1f", fsize/common.T) + " TB"
+ case asize >= common.G:
+ return ui.orange.Sprintf("%.1f", fsize/common.G) + " GB"
+ case asize >= common.M:
+ return ui.orange.Sprintf("%.1f", fsize/common.M) + " MB"
+ case asize >= common.K:
+ return ui.orange.Sprintf("%.1f", fsize/common.K) + " kB"
+ default:
+ return ui.orange.Sprintf("%d", size) + " B"
+ }
+}
+
+func maxLength(list []*device.Device, keyGetter func(*device.Device) string) int {
+ maxLen := 0
+ var s string
+ for _, item := range list {
+ s = keyGetter(item)
+ if len(s) > maxLen {
+ maxLen = len(s)
+ }
+ }
+ return maxLen
+}
+
+func maxInt(x, y int) int {
+ if x > y {
+ return x
+ }
+ return y
+}
--- /dev/null
+//go:build linux
+
+package stdout
+
+import (
+ "bytes"
+ "testing"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/dundee/gdu/v5/pkg/device"
+ "github.com/stretchr/testify/assert"
+)
+
+func init() {
+ log.SetLevel(log.WarnLevel)
+}
+
+func TestShowDevicesWithErr(t *testing.T) {
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ getter := device.LinuxDevicesInfoGetter{MountsPath: "/xyzxyz"}
+ ui := CreateStdoutUI(output, false, true, false, false, false, false, false, "", 0, false, 0)
+ err := ui.ListDevices(getter)
+
+ assert.Contains(t, err.Error(), "no such file")
+}
--- /dev/null
+package stdout
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/dundee/gdu/v5/internal/testanalyze"
+ "github.com/dundee/gdu/v5/internal/testdev"
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/device"
+ "github.com/stretchr/testify/assert"
+)
+
+func init() {
+ log.SetLevel(log.WarnLevel)
+}
+
+func TestAnalyzePath(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, false, false, false, false, false, false, false, "", 0, false, 0)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "nested")
+}
+
+func TestShowItemCountInNonInteractiveMode(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ for dirName, fileCount := range map[string]int{"a": 5, "b": 10, "c": 15} {
+ dirPath := filepath.Join(tmpDir, dirName)
+ err := os.Mkdir(dirPath, 0o755)
+ assert.Nil(t, err)
+
+ for i := 0; i < fileCount; i++ {
+ filePath := filepath.Join(dirPath, "f"+string(rune('a'+i)))
+ err = os.WriteFile(filePath, []byte("x"), 0o644)
+ assert.Nil(t, err)
+ }
+ }
+
+ output := bytes.NewBuffer(make([]byte, 10))
+ ui := CreateStdoutUI(output, false, false, false, false, false, false, false, "", 0, false, 0)
+ ui.SetShowItemCount()
+
+ err := ui.AnalyzePath(tmpDir, nil)
+ assert.Nil(t, err)
+
+ out := output.String()
+ assert.Regexp(t, regexp.MustCompile(`(?m)\s+5\s+/a$`), out)
+ assert.Regexp(t, regexp.MustCompile(`(?m)\s+10\s+/b$`), out)
+ assert.Regexp(t, regexp.MustCompile(`(?m)\s+15\s+/c$`), out)
+}
+
+func TestShowItemCountInNonInteractiveModeWithColorsAndFile(t *testing.T) {
+ tmpDir := t.TempDir()
+ filePath := filepath.Join(tmpDir, "single")
+ err := os.WriteFile(filePath, []byte("x"), 0o644)
+ assert.Nil(t, err)
+
+ output := bytes.NewBuffer(make([]byte, 10))
+ ui := CreateStdoutUI(output, true, false, false, false, false, false, false, "", 0, false, 0)
+ ui.SetShowItemCount()
+
+ err = ui.AnalyzePath(tmpDir, nil)
+ assert.Nil(t, err)
+
+ out := output.String()
+ assert.Regexp(t, regexp.MustCompile(`(?m)\s+1\s+single$`), out)
+}
+
+func TestShowSummary(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, true, false, true, false, true, false, false, "", 0, false, 0)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "test_dir")
+}
+
+func TestShowSummaryBw(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, false, false, false, false, true, false, false, "", 0, false, 0)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "test_dir")
+}
+
+func TestShowTop(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, true, false, true, false, true, false, false, "", 2, false, 0)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "test_dir/nested/subnested/file")
+ assert.Contains(t, output.String(), "test_dir/nested/file2")
+}
+
+func TestShowTopBw(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, false, false, false, false, true, false, false, "", 2, false, 0)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "test_dir/nested/subnested/file")
+ assert.Contains(t, output.String(), "test_dir/nested/file2")
+}
+
+func TestShowDepth(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, false, false, false, false, false, false, false, "", 0, false, 2)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "test_dir")
+ assert.Contains(t, output.String(), "test_dir/nested")
+ assert.Contains(t, output.String(), "test_dir/nested/subnested")
+}
+
+func TestShowDepthWithColors(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, true, false, false, false, false, false, false, "", 0, false, 2)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "test_dir")
+ assert.Contains(t, output.String(), "test_dir/nested")
+}
+
+func TestShowDepthWithReverseSort(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, false, false, false, false, false, false, false, "", 0, true, 2)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+
+ assert.Nil(t, err)
+ outputStr := output.String()
+ assert.Contains(t, outputStr, "test_dir")
+ assert.Contains(t, outputStr, "test_dir/nested")
+ assert.Contains(t, outputStr, "test_dir/nested/subnested")
+}
+
+func TestAnalyzeSubdir(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, false, false, false, false, false, false, false, "", 0, false, 0)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir/nested", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "file2")
+}
+
+func TestAnalyzePathWithColors(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, true, false, true, false, false, false, false, "", 0, false, 0)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir/nested", nil)
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "subnested")
+}
+
+func TestAnalyzePathWoUnicode(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, false, true, true, false, false, false, false, "", 0, false, 0)
+ ui.UseOldProgressRunes()
+ err := ui.AnalyzePath("test_dir/nested", nil)
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "subnested")
+}
+
+func TestItemRows(t *testing.T) {
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateStdoutUI(output, false, true, false, false, false, false, false, "", 0, false, 0)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ err := ui.AnalyzePath("test_dir", nil)
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "KiB")
+}
+
+func TestAnalyzePathWithProgress(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateStdoutUI(output, false, true, true, false, false, false, false, "", 0, false, 0)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir", nil)
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "nested")
+}
+
+func TestShowDevices(t *testing.T) {
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateStdoutUI(output, false, true, false, false, false, false, false, "", 0, false, 0)
+ err := ui.ListDevices(getDevicesInfoMock())
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "Device")
+ assert.Contains(t, output.String(), "xxx")
+}
+
+func TestShowDevicesWithColor(t *testing.T) {
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateStdoutUI(output, true, true, true, false, false, false, false, "", 0, false, 0)
+ err := ui.ListDevices(getDevicesInfoMock())
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "Device")
+ assert.Contains(t, output.String(), "xxx")
+}
+
+func TestReadAnalysisWithColor(t *testing.T) {
+ input, err := os.OpenFile("../internal/testdata/test.json", os.O_RDONLY, 0o644)
+ assert.Nil(t, err)
+
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateStdoutUI(output, true, true, true, false, false, false, false, "", 0, false, 0)
+ err = ui.ReadAnalysis(input)
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "main.go")
+}
+
+func TestReadAnalysisBw(t *testing.T) {
+ input, err := os.OpenFile("../internal/testdata/test.json", os.O_RDONLY, 0o644)
+ assert.Nil(t, err)
+
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateStdoutUI(output, false, false, false, false, false, false, false, "", 0, false, 0)
+ err = ui.ReadAnalysis(input)
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "main.go")
+}
+
+func TestReadAnalysisWithWrongFile(t *testing.T) {
+ input, err := os.OpenFile("../internal/testdata/wrong.json", os.O_RDONLY, 0o644)
+ assert.Nil(t, err)
+
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateStdoutUI(output, true, true, true, false, false, false, false, "", 0, false, 0)
+ err = ui.ReadAnalysis(input)
+
+ assert.NotNil(t, err)
+}
+
+func TestReadAnalysisWithSummarize(t *testing.T) {
+ input, err := os.OpenFile("../internal/testdata/test.json", os.O_RDONLY, 0o644)
+ assert.Nil(t, err)
+
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateStdoutUI(output, false, false, false, false, true, false, false, "", 0, false, 0)
+ err = ui.ReadAnalysis(input)
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), " gdu\n")
+}
+
+func TestMaxInt(t *testing.T) {
+ assert.Equal(t, 5, maxInt(2, 5))
+ assert.Equal(t, 4, maxInt(4, 2))
+}
+
+func TestFormatSize(t *testing.T) {
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateStdoutUI(output, true, true, true, false, false, false, false, "", 0, false, 0)
+
+ assert.Contains(t, ui.formatSize(1), "B")
+ assert.Contains(t, ui.formatSize(1<<10+1), "KiB")
+ assert.Contains(t, ui.formatSize(1<<20+1), "MiB")
+ assert.Contains(t, ui.formatSize(1<<30+1), "GiB")
+ assert.Contains(t, ui.formatSize(1<<40+1), "TiB")
+ assert.Contains(t, ui.formatSize(1<<50+1), "PiB")
+ assert.Contains(t, ui.formatSize(1<<60+1), "EiB")
+}
+
+func TestFormatSizeDec(t *testing.T) {
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateStdoutUI(output, true, true, true, false, false, true, false, "", 0, false, 0)
+
+ assert.Contains(t, ui.formatSize(1), "B")
+ assert.Contains(t, ui.formatSize(1<<10+1), "kB")
+ assert.Contains(t, ui.formatSize(1<<20+1), "MB")
+ assert.Contains(t, ui.formatSize(1<<30+1), "GB")
+ assert.Contains(t, ui.formatSize(1<<40+1), "TB")
+ assert.Contains(t, ui.formatSize(1<<50+1), "PB")
+ assert.Contains(t, ui.formatSize(1<<60+1), "EB")
+}
+
+func TestFormatCount(t *testing.T) {
+ output := bytes.NewBuffer(make([]byte, 10))
+ ui := CreateStdoutUI(output, true, true, true, false, false, true, false, "", 0, false, 0)
+
+ assert.Equal(t, "42", ui.formatCount(42))
+ assert.Equal(t, "1.5k", ui.formatCount(1500))
+ assert.Equal(t, "2.5M", ui.formatCount(2500000))
+ assert.Equal(t, "3.5G", ui.formatCount(3500000000))
+}
+
+func TestFormatSizeRaw(t *testing.T) {
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateStdoutUI(output, true, true, true, false, false, true, true, "", 0, false, 0)
+
+ assert.Equal(t, ui.formatSize(1), "1")
+ assert.Equal(t, ui.formatSize(1<<10+1), "1025")
+ assert.Equal(t, ui.formatSize(1<<20+1), "1048577")
+ assert.Equal(t, ui.formatSize(1<<30+1), "1073741825")
+ assert.Equal(t, ui.formatSize(1<<40+1), "1099511627777")
+ assert.Equal(t, ui.formatSize(1<<50+1), "1125899906842625")
+ assert.Equal(t, ui.formatSize(1<<60+1), "1152921504606846977")
+}
+func TestFormatSizeFixedUnitBinary(t *testing.T) {
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateStdoutUI(output, false, false, false, false, false, false, false, "k", 0, false, 0)
+ assert.Equal(t, "0.1 KiB", ui.formatSize(100))
+ assert.Equal(t, "1500.0 KiB", ui.formatSize(1536000))
+
+ ui = CreateStdoutUI(output, false, false, false, false, false, false, false, "m", 0, false, 0)
+ assert.Equal(t, "0.1 MiB", ui.formatSize(100*1024))
+ assert.Equal(t, "1500.0 MiB", ui.formatSize(1536000*1024))
+
+ ui = CreateStdoutUI(output, false, false, false, false, false, false, false, "g", 0, false, 0)
+ assert.Equal(t, "0.1 GiB", ui.formatSize(100*1024*1024))
+ assert.Equal(t, "1500.0 GiB", ui.formatSize(1536000*1024*1024))
+}
+func TestFormatSizeFixedUnitSI(t *testing.T) {
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ // -k --si
+ ui := CreateStdoutUI(output, false, false, false, false, false, true, false, "k", 0, false, 0)
+ assert.Equal(t, "0.1 kB", ui.formatSize(100))
+ assert.Equal(t, "1500.0 kB", ui.formatSize(15e+5))
+
+ ui = CreateStdoutUI(output, false, false, false, false, false, true, false, "m", 0, false, 0)
+ assert.Equal(t, "0.1 MB", ui.formatSize(1e+5))
+ assert.Equal(t, "1500.0 MB", ui.formatSize(1.5e+9))
+
+ ui = CreateStdoutUI(output, false, false, false, false, false, true, false, "g", 0, false, 0)
+ assert.Equal(t, "0.1 GB", ui.formatSize(1e+8))
+ assert.Equal(t, "1500.0 GB", ui.formatSize(1.5e+12))
+}
+
+// func printBuffer(buff *bytes.Buffer) {
+// for i, x := range buff.String() {
+// println(i, string(x))
+// }
+// }
+
+func getDevicesInfoMock() device.DevicesInfoGetter {
+ item := &device.Device{
+ Name: "xxx",
+ }
+
+ mock := testdev.DevicesInfoGetterMock{}
+ mock.Devices = []*device.Device{item}
+ return mock
+}
+
+// New tests for reverse sort functionality
+func TestAnalyzePathWithReverseSort(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, false, false, false, false, false, false, false, "", 0, true, 0)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "nested")
+
+ // Verify that smaller items appear first when reverse sort is enabled
+ outputStr := output.String()
+ lines := strings.Split(outputStr, "\n")
+
+ // Filter out empty lines and progress lines
+ var fileLines []string
+ for _, line := range lines {
+ if strings.Contains(line, " ") && !strings.Contains(line, "Scanning") {
+ fileLines = append(fileLines, line)
+ }
+ }
+
+ // With reverse sort, smaller files should appear before larger ones
+ assert.True(t, len(fileLines) > 0, "Should have file entries in output")
+}
+
+func TestAnalyzePathWithoutReverseSort(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, false, false, false, false, false, false, false, "", 0, false, 0)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "nested")
+}
+
+func TestReverseSortWithColors(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, true, false, true, false, false, false, false, "", 0, true, 0)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir/nested", nil)
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "subnested")
+}
+
+func TestReverseSortWithSummarize(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, false, false, false, false, true, false, false, "", 0, true, 0)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "test_dir")
+}
+
+func TestReverseSortWithTop(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ buff := make([]byte, 10)
+ output := bytes.NewBuffer(buff)
+
+ ui := CreateStdoutUI(output, true, false, true, false, true, false, false, "", 2, true, 0)
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+ err = ui.StartUILoop()
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "test_dir/nested/subnested/file")
+ assert.Contains(t, output.String(), "test_dir/nested/file2")
+}
+
+func TestReverseSortFromAnalysisFile(t *testing.T) {
+ input, err := os.OpenFile("../internal/testdata/test.json", os.O_RDONLY, 0o644)
+ assert.Nil(t, err)
+
+ output := bytes.NewBuffer(make([]byte, 10))
+
+ ui := CreateStdoutUI(output, true, true, true, false, false, false, false, "", 0, true, 0)
+ err = ui.ReadAnalysis(input)
+
+ assert.Nil(t, err)
+ assert.Contains(t, output.String(), "main.go")
+}
--- /dev/null
+package tui
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "runtime"
+ "runtime/debug"
+ "strconv"
+ "strings"
+ "time"
+
+ "golang.org/x/text/cases"
+ "golang.org/x/text/language"
+
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+
+ "github.com/dundee/gdu/v5/build"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/device"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/dundee/gdu/v5/report"
+)
+
+const (
+ defaultLinesCount = 500
+ linesThreshold = 20
+
+ actionEmpty = "empty"
+ actionDelete = "delete"
+
+ actingEmpty = "emptying"
+ actingDelete = "deleting"
+)
+
+// ListDevices lists mounted devices and shows their disk usage
+func (ui *UI) ListDevices(getter device.DevicesInfoGetter) error {
+ var err error
+ ui.getter = getter
+ ui.devices, err = getter.GetDevicesInfo()
+ if err != nil {
+ return err
+ }
+
+ ui.showDevices()
+
+ return nil
+}
+
+// AnalyzePath analyzes recursively disk usage for given path
+func (ui *UI) AnalyzePath(path string, parentDir fs.Item) error {
+ ui.progress = tview.NewTextView().SetText("Scanning...")
+ ui.progress.SetBorder(true).SetBorderPadding(2, 2, 2, 2)
+ ui.progress.SetTitle(" Scanning... ")
+ ui.progress.SetDynamicColors(true)
+
+ flex := tview.NewFlex().
+ AddItem(nil, 0, 1, false).
+ AddItem(tview.NewFlex().SetDirection(tview.FlexRow).
+ AddItem(nil, 0, 1, false).
+ AddItem(ui.progress, 8, 1, false).
+ AddItem(nil, 0, 1, false), 0, 50, false).
+ AddItem(nil, 0, 1, false)
+
+ ui.pages.AddPage("progress", flex, true, true)
+
+ go ui.updateProgress()
+
+ go func() {
+ defer debug.FreeOSMemory()
+ currentDir := ui.Analyzer.AnalyzeDir(path, ui.CreateIgnoreFunc(), ui.CreateFileTypeFilter())
+
+ if parentDir != nil {
+ currentDir.SetParent(parentDir)
+ // Remove old entry with the same name and add new one
+ parentDir.RemoveFileByName(currentDir.GetName())
+ parentDir.AddFile(currentDir)
+ } else {
+ ui.topDirPath = path
+ ui.topDir = currentDir
+ }
+
+ ui.topDir.UpdateStats(ui.linkedItems)
+
+ ui.app.QueueUpdateDraw(func() {
+ ui.currentDir = currentDir
+ ui.showDir()
+ ui.pages.RemovePage("progress")
+ })
+
+ if ui.done != nil {
+ ui.done <- struct{}{}
+ }
+ }()
+
+ return nil
+}
+
+// ReadAnalysis reads analysis report from JSON file
+func (ui *UI) ReadAnalysis(input io.Reader) error {
+ ui.progress = tview.NewTextView().SetText("Reading analysis from file...")
+ ui.progress.SetBorder(true).SetBorderPadding(2, 2, 2, 2)
+ ui.progress.SetTitle(" Reading... ")
+ ui.progress.SetDynamicColors(true)
+
+ flex := tview.NewFlex().
+ AddItem(nil, 0, 1, false).
+ AddItem(tview.NewFlex().SetDirection(tview.FlexRow).
+ AddItem(nil, 10, 1, false).
+ AddItem(ui.progress, 8, 1, false).
+ AddItem(nil, 10, 1, false), 0, 50, false).
+ AddItem(nil, 0, 1, false)
+
+ ui.pages.AddPage("progress", flex, true, true)
+
+ go func() {
+ var err error
+ ui.currentDir, err = report.ReadAnalysis(input)
+ if err != nil {
+ ui.app.QueueUpdateDraw(func() {
+ ui.pages.RemovePage("progress")
+ ui.showErr("Error reading file", err)
+ })
+ if ui.done != nil {
+ ui.done <- struct{}{}
+ }
+ return
+ }
+ runtime.GC()
+
+ ui.topDirPath = ui.currentDir.GetPath()
+ ui.topDir = ui.currentDir
+
+ links := make(fs.HardLinkedItems, 10)
+ ui.topDir.UpdateStats(links)
+
+ ui.app.QueueUpdateDraw(func() {
+ ui.showDir()
+ ui.pages.RemovePage("progress")
+ })
+
+ if ui.done != nil {
+ ui.done <- struct{}{}
+ }
+ }()
+
+ return nil
+}
+
+// ReadFromStorage reads analysis data from persistent key-value storage
+func (ui *UI) ReadFromStorage(storagePath, path string) error {
+ storage := analyze.NewStorage(storagePath, path)
+ closeFn := storage.Open()
+ defer closeFn()
+
+ dir, err := storage.GetDirForPath(path)
+ if err != nil {
+ return err
+ }
+
+ ui.currentDir = dir
+ ui.topDirPath = ui.currentDir.GetPath()
+ ui.topDir = ui.currentDir
+
+ ui.showDir()
+ return nil
+}
+
+func (ui *UI) delete(shouldEmpty bool) {
+ if len(ui.markedRows) > 0 {
+ ui.deleteMarked(shouldEmpty)
+ } else {
+ ui.deleteSelected(shouldEmpty)
+ }
+}
+
+func (ui *UI) deleteSelected(shouldEmpty bool) {
+ row, column := ui.table.GetSelection()
+ selectedItem := ui.table.GetCell(row, column).GetReference().(fs.Item)
+
+ if ui.deleteInBackground {
+ ui.queueForDeletion([]fs.Item{selectedItem}, shouldEmpty)
+ return
+ }
+
+ var action, acting string
+ if shouldEmpty {
+ action = actionEmpty
+ acting = actingEmpty
+ } else {
+ action = actionDelete
+ acting = actingDelete
+ }
+ modal := tview.NewModal().SetText(
+ cases.Title(language.English).String(acting) +
+ " " +
+ tview.Escape(selectedItem.GetName()) +
+ "...",
+ )
+ ui.pages.AddPage(acting, modal, true, true)
+
+ var currentDir fs.Item
+ var deleteItems []fs.Item
+ if shouldEmpty && selectedItem.IsDir() {
+ currentDir = selectedItem
+ for file := range currentDir.GetFiles(fs.SortBySize, fs.SortDesc) {
+ deleteItems = append(deleteItems, file)
+ }
+ } else {
+ currentDir = ui.currentDir
+ deleteItems = append(deleteItems, selectedItem)
+ }
+
+ var deleteFun func(fs.Item, fs.Item) error
+ if shouldEmpty && !selectedItem.IsDir() {
+ deleteFun = ui.emptier
+ } else {
+ deleteFun = ui.remover
+ }
+ go func() {
+ for _, item := range deleteItems {
+ if err := deleteFun(currentDir, item); err != nil {
+ msg := "Can't " + action + " " + tview.Escape(selectedItem.GetName())
+ ui.app.QueueUpdateDraw(func() {
+ ui.pages.RemovePage(acting)
+ ui.showErr(msg, err)
+ })
+ if ui.done != nil {
+ ui.done <- struct{}{}
+ }
+ return
+ }
+ }
+
+ ui.app.QueueUpdateDraw(func() {
+ ui.pages.RemovePage(acting)
+ x, y := ui.table.GetOffset()
+ ui.showDir()
+ ui.table.Select(min(row, ui.table.GetRowCount()-1), 0)
+ ui.table.SetOffset(min(x, ui.table.GetRowCount()-1), y)
+ })
+
+ if ui.done != nil {
+ ui.done <- struct{}{}
+ }
+ }()
+}
+
+func (ui *UI) showInfo() {
+ if ui.currentDir == nil {
+ return
+ }
+
+ var content, numberColor string
+ row, column := ui.table.GetSelection()
+ selectedFile := ui.table.GetCell(row, column).GetReference().(fs.Item)
+
+ if ui.UseColors {
+ numberColor = fmt.Sprintf(
+ "[%s::b]",
+ ui.resultRow.NumberColor,
+ )
+ } else {
+ numberColor = defaultColorBold
+ }
+
+ linesCount := 12
+
+ text := tview.NewTextView().SetDynamicColors(true)
+ text.SetBorder(true).SetBorderPadding(2, 2, 2, 2)
+ text.SetBorderColor(tcell.ColorDefault)
+ text.SetTitle(" Item info ")
+
+ content += "[::b]Name:[::-] "
+ content += tview.Escape(selectedFile.GetName()) + "\n"
+ content += "[::b]Path:[::-] "
+ content += tview.Escape(
+ strings.TrimPrefix(selectedFile.GetPath(), build.RootPathPrefix),
+ ) + "\n"
+ content += "[::b]Type:[::-] " + selectedFile.GetType() + "\n\n"
+
+ content += " [::b]Disk usage:[::-] "
+ content += numberColor + ui.formatSize(selectedFile.GetUsage(), false, true)
+ content += fmt.Sprintf(" (%s%d[-::] B)", numberColor, selectedFile.GetUsage()) + "\n"
+ content += "[::b]Apparent size:[::-] "
+ content += numberColor + ui.formatSize(selectedFile.GetSize(), false, true)
+ content += fmt.Sprintf(" (%s%d[-::] B)", numberColor, selectedFile.GetSize()) + "\n"
+
+ if selectedFile.GetMultiLinkedInode() > 0 {
+ linkedItems := ui.linkedItems[selectedFile.GetMultiLinkedInode()]
+ linesCount += 2 + len(linkedItems)
+ content += "\nHard-linked files:\n"
+ for _, linkedItem := range linkedItems {
+ content += "\t" + linkedItem.GetPath() + "\n"
+ }
+ }
+
+ text.SetText(content)
+
+ flex := tview.NewFlex().
+ AddItem(nil, 0, 1, false).
+ AddItem(tview.NewFlex().SetDirection(tview.FlexRow).
+ AddItem(nil, 0, 1, false).
+ AddItem(text, linesCount, 1, false).
+ AddItem(nil, 0, 1, false), 80, 1, false).
+ AddItem(nil, 0, 1, false)
+
+ ui.pages.AddPage("info", flex, true, true)
+}
+
+func (ui *UI) openItem() {
+ row, column := ui.table.GetSelection()
+ selectedFile, ok := ui.table.GetCell(row, column).GetReference().(fs.Item)
+ if !ok || selectedFile == ui.currentDir.GetParent() {
+ return
+ }
+
+ openBinary := "xdg-open"
+
+ switch runtime.GOOS {
+ case "darwin":
+ openBinary = "open"
+ case "windows":
+ openBinary = "explorer"
+ }
+
+ cmd := exec.Command(openBinary, selectedFile.GetPath())
+ err := cmd.Start()
+ if err != nil {
+ ui.showErr("Error opening", err)
+ }
+}
+
+func (ui *UI) confirmExport() *tview.Form {
+ form := tview.NewForm().
+ AddInputField("File name", "export.json", 30, nil, func(v string) {
+ ui.exportName = v
+ }).
+ AddButton("Export", ui.exportAnalysis).
+ SetButtonsAlign(tview.AlignCenter)
+ form.SetBorder(true).
+ SetTitle(" Export data to JSON ").
+ SetInputCapture(func(key *tcell.EventKey) *tcell.EventKey {
+ if key.Key() == tcell.KeyEsc {
+ ui.pages.RemovePage("export")
+ ui.app.SetFocus(ui.table)
+ return nil
+ }
+ return key
+ })
+ flex := modal(form, 50, 7)
+ ui.pages.AddPage("export", flex, true, true)
+ ui.app.SetFocus(form)
+ return form
+}
+
+func (ui *UI) exportAnalysis() {
+ ui.pages.RemovePage("export")
+
+ text := tview.NewTextView().SetText("Export in progress...").SetTextAlign(tview.AlignCenter)
+ text.SetBorder(true).SetTitle(" Export data to JSON ")
+ flex := modal(text, 50, 3)
+ ui.pages.AddPage("exporting", flex, true, true)
+
+ go func() {
+ var err error
+ defer ui.app.QueueUpdateDraw(func() {
+ ui.pages.RemovePage("exporting")
+ if err == nil {
+ ui.app.SetFocus(ui.table)
+ }
+ })
+ if ui.done != nil {
+ defer func() {
+ ui.done <- struct{}{}
+ }()
+ }
+
+ var buff bytes.Buffer
+
+ buff.Write([]byte(`[1,2,{"progname":"gdu","progver":"`))
+ buff.Write([]byte(build.Version))
+ buff.Write([]byte(`","timestamp":`))
+ buff.Write([]byte(strconv.FormatInt(time.Now().Unix(), 10)))
+ buff.Write([]byte("},\n"))
+
+ file, err := os.Create(ui.exportName)
+ if err != nil {
+ ui.showErrFromGo("Error creating file", err)
+ return
+ }
+
+ if err = ui.topDir.EncodeJSON(&buff, true); err != nil {
+ ui.showErrFromGo("Error encoding JSON", err)
+ return
+ }
+
+ if _, err = buff.Write([]byte("]\n")); err != nil {
+ ui.showErrFromGo("Error writing to buffer", err)
+ return
+ }
+ if _, err = buff.WriteTo(file); err != nil {
+ ui.showErrFromGo("Error writing to file", err)
+ return
+ }
+ }()
+}
+
+func (ui *UI) isInArchive() bool {
+ if ui.currentDir == nil {
+ return false
+ }
+ _, ok := ui.currentDir.(*analyze.ZipDir)
+ return ok
+}
--- /dev/null
+//go:build linux
+
+package tui
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testapp"
+ "github.com/dundee/gdu/v5/pkg/device"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestShowDevicesWithError(t *testing.T) {
+ app, simScreen := testapp.CreateTestAppWithSimScreen(50, 50)
+ defer simScreen.Fini()
+
+ getter := device.LinuxDevicesInfoGetter{MountsPath: "/xyzxyz"}
+
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+ err := ui.ListDevices(getter)
+
+ assert.Contains(t, err.Error(), "no such file")
+}
--- /dev/null
+package tui
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "slices"
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testanalyze"
+ "github.com/dundee/gdu/v5/internal/testapp"
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/gdamore/tcell/v2"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestShowDevices(t *testing.T) {
+ app, simScreen := testapp.CreateTestAppWithSimScreen(50, 50)
+ defer simScreen.Fini()
+
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ err := ui.ListDevices(getDevicesInfoMock())
+
+ assert.Nil(t, err)
+
+ ui.table.Draw(simScreen)
+ simScreen.Show()
+
+ b, _, _ := simScreen.GetContents()
+
+ text := []byte("Device name")
+ for i, r := range b[0:11] {
+ assert.Equal(t, text[i], r.Bytes[0])
+ }
+}
+
+func TestShowDevicesBW(t *testing.T) {
+ app, simScreen := testapp.CreateTestAppWithSimScreen(50, 50)
+ defer simScreen.Fini()
+
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+ err := ui.ListDevices(getDevicesInfoMock())
+
+ assert.Nil(t, err)
+
+ ui.table.Draw(simScreen)
+ simScreen.Show()
+
+ b, _, _ := simScreen.GetContents()
+
+ text := []byte("Device name")
+ for i, r := range b[0:11] {
+ assert.Equal(t, text[i], r.Bytes[0])
+ }
+}
+
+func TestDeviceSelected(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, true, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ ui.UseOldSizeBar()
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+ err := ui.ListDevices(getDevicesInfoMock())
+
+ assert.Nil(t, err)
+ assert.Equal(t, 3, ui.table.GetRowCount())
+
+ ui.deviceItemSelected(1, 0)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ assert.Equal(t, 4, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "ccc")
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "bbb")
+}
+
+func TestNilDeviceSelected(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, true, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ ui.UseOldSizeBar()
+ ui.SetIgnoreDirPaths([]string{"/xxx"})
+
+ ui.deviceItemSelected(1, 0)
+
+ assert.Equal(t, 0, ui.table.GetRowCount())
+}
+
+func TestAnalyzePath(t *testing.T) {
+ ui := getAnalyzedPathMockedApp(t, true, true, true)
+
+ assert.Equal(t, 4, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "ccc")
+}
+
+func TestAnalyzePathBW(t *testing.T) {
+ ui := getAnalyzedPathMockedApp(t, false, true, true)
+
+ assert.Equal(t, 4, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "ccc")
+}
+
+func TestAnalyzePathWithParentDir(t *testing.T) {
+ parentDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "parent",
+ },
+ Files: make([]fs.Item, 0, 1),
+ }
+
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, true, true)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.topDir = parentDir
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", parentDir)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+ assert.Equal(t, parentDir, ui.currentDir.GetParent())
+
+ assert.Equal(t, 5, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "/..")
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "ccc")
+}
+
+func TestReadAnalysis(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ input, err := os.OpenFile("../internal/testdata/test.json", os.O_RDONLY, 0o644)
+ assert.Nil(t, err)
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, true, false)
+ ui.done = make(chan struct{})
+
+ err = ui.ReadAnalysis(input)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for reading
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "gdu", ui.currentDir.GetName())
+}
+
+func TestReadAnalysisWithWrongFile(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ input, err := os.OpenFile("../internal/testdata/wrong.json", os.O_RDONLY, 0o644)
+ assert.Nil(t, err)
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.done = make(chan struct{})
+
+ err = ui.ReadAnalysis(input)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for reading
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.True(t, ui.pages.HasPage("error"))
+}
+
+func TestViewDirContents(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ res := ui.showFile() // selected item is dir, do nothing
+ assert.Nil(t, res)
+}
+
+func TestViewFileWithoutCurrentDir(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+
+ res := ui.showFile() // no current directory
+ assert.Nil(t, res)
+}
+
+func TestViewContentsOfNotExistingFile(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.table.Select(3, 0)
+
+ selectedFile := ui.table.GetCell(3, 0).GetReference().(fs.Item)
+ assert.Equal(t, "ddd", selectedFile.GetName())
+
+ res := ui.showFile()
+ assert.Nil(t, res)
+}
+
+func TestViewFile(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.table.Select(0, 0)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.table.Select(2, 0)
+
+ file := ui.showFile()
+ assert.True(t, ui.pages.HasPage("file"))
+
+ event := file.GetInputCapture()(tcell.NewEventKey(tcell.KeyRune, 'j', 0))
+ assert.Equal(t, 'j', event.Rune())
+}
+
+func TestChangeCwd(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+ cwd := ""
+
+ opt := func(ui *UI) {
+ ui.SetChangeCwdFn(func(p string) error {
+ cwd = p
+ return nil
+ })
+ }
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false, opt)
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.table.Select(0, 0)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.table.Select(1, 0)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+
+ assert.Equal(t, cwd, "test_dir/nested/subnested")
+}
+
+func TestChangeCwdWithErr(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+ cwd := ""
+
+ opt := func(ui *UI) {
+ ui.SetChangeCwdFn(func(p string) error {
+ cwd = p
+ return errors.New("failed")
+ })
+ }
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false, opt)
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.table.Select(0, 0)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.table.Select(1, 0)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+
+ assert.Equal(t, cwd, "test_dir/nested/subnested")
+}
+
+func TestShowInfo(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'i', 0))
+
+ assert.True(t, ui.pages.HasPage("info"))
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'q', 0))
+
+ assert.False(t, ui.pages.HasPage("info"))
+}
+
+func TestShowInfoBW(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, false, false, false)
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'i', 0))
+
+ assert.True(t, ui.pages.HasPage("info"))
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'i', 0))
+
+ assert.False(t, ui.pages.HasPage("info"))
+}
+
+func TestShowInfoWithHardlinks(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ files := slices.Collect(ui.currentDir.GetFiles(fs.SortByName, fs.SortAsc))
+ nested := files[0].(*analyze.Dir)
+ subnested := nested.Files[1].(*analyze.Dir)
+ file := subnested.Files[0].(*analyze.File)
+ file2 := nested.Files[0].(*analyze.File)
+ file.Mli = 1
+ file2.Mli = 1
+
+ ui.currentDir.UpdateStats(ui.linkedItems)
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.table.Select(1, 0)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.table.Select(1, 0)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'i', 0))
+
+ assert.True(t, ui.pages.HasPage("info"))
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'q', 0))
+
+ assert.False(t, ui.pages.HasPage("info"))
+}
+
+func TestShowInfoWithoutCurrentDir(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+
+ // pressing `i` will do nothing
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'i', 0))
+ assert.False(t, ui.pages.HasPage("info"))
+}
+
+func TestExitViewFile(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.table.Select(0, 0)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.table.Select(2, 0)
+
+ file := ui.showFile()
+
+ assert.True(t, ui.pages.HasPage("file"))
+
+ file.GetInputCapture()(tcell.NewEventKey(tcell.KeyRune, 'q', 0))
+
+ assert.False(t, ui.pages.HasPage("file"))
+}
--- /dev/null
+package tui
+
+import (
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/rivo/tview"
+)
+
+func (ui *UI) queueForDeletion(items []fs.Item, shouldEmpty bool) {
+ go func() {
+ for _, item := range items {
+ ui.deleteQueue <- deleteQueueItem{item: item, shouldEmpty: shouldEmpty}
+ }
+ }()
+
+ ui.markedRows = make(map[int]struct{})
+}
+
+func (ui *UI) deleteWorker() {
+ defer func() {
+ if r := recover(); r != nil {
+ ui.app.Stop()
+ panic(r)
+ }
+ }()
+
+ for item := range ui.deleteQueue {
+ ui.deleteItem(item.item, item.shouldEmpty)
+ }
+}
+
+func (ui *UI) deleteItem(item fs.Item, shouldEmpty bool) {
+ ui.increaseActiveWorkers()
+ defer ui.decreaseActiveWorkers()
+
+ var action, acting string
+ if shouldEmpty {
+ action = actionEmpty
+ } else {
+ action = actionDelete
+ }
+
+ var deleteFun func(fs.Item, fs.Item) error
+ if shouldEmpty && !item.IsDir() {
+ deleteFun = ui.emptier
+ } else {
+ deleteFun = ui.remover
+ }
+
+ var parentDir fs.Item
+ var deleteItems []fs.Item
+ if shouldEmpty && item.IsDir() {
+ parentDir = item
+ for file := range item.GetFilesLocked(fs.SortBySize, fs.SortDesc) {
+ deleteItems = append(deleteItems, file)
+ }
+ } else {
+ parentDir = ui.currentDir
+ deleteItems = append(deleteItems, item)
+ }
+
+ for _, toDelete := range deleteItems {
+ if err := deleteFun(parentDir, toDelete); err != nil {
+ msg := "Can't " + action + " " + tview.Escape(toDelete.GetName())
+ ui.app.QueueUpdateDraw(func() {
+ ui.pages.RemovePage(acting)
+ ui.showErr(msg, err)
+ })
+ if ui.done != nil {
+ ui.done <- struct{}{}
+ }
+ return
+ }
+ }
+
+ if item.GetParent().GetPath() == ui.currentDir.GetPath() {
+ ui.app.QueueUpdateDraw(func() {
+ row, _ := ui.table.GetSelection()
+ x, y := ui.table.GetOffset()
+ ui.showDir()
+ ui.table.Select(min(row, ui.table.GetRowCount()-1), 0)
+ ui.table.SetOffset(min(x, ui.table.GetRowCount()-1), y)
+ })
+ }
+ if ui.done != nil {
+ ui.done <- struct{}{}
+ }
+}
+
+func (ui *UI) increaseActiveWorkers() {
+ ui.workersMut.Lock()
+ defer ui.workersMut.Unlock()
+ ui.activeWorkers++
+}
+
+func (ui *UI) decreaseActiveWorkers() {
+ ui.workersMut.Lock()
+ defer ui.workersMut.Unlock()
+ ui.activeWorkers--
+}
--- /dev/null
+package tui
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCollapsedPathStruct(t *testing.T) {
+ // Test CollapsedPath struct creation and fields
+ cp := &CollapsedPath{
+ DisplayName: "test/path",
+ DeepestDir: nil,
+ Segments: []string{"test", "path"},
+ }
+
+ assert.Equal(t, "test/path", cp.DisplayName)
+ assert.Nil(t, cp.DeepestDir)
+ assert.Equal(t, []string{"test", "path"}, cp.Segments)
+}
+
+func TestFindCollapsedParentNilCases(t *testing.T) {
+ // Test nil input
+ result := findCollapsedParent(nil)
+ assert.Nil(t, result)
+}
+
+// Test that our new functions exist and don't panic with basic inputs
+func TestFunctionExistence(t *testing.T) {
+ // Test that findCollapsiblePath exists and handles nil gracefully
+ result := findCollapsiblePath(nil)
+ assert.Nil(t, result)
+}
--- /dev/null
+package tui
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testapp"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFindCollapsiblePath(t *testing.T) {
+ // Test case 1: Non-directory item should return nil
+ file := &analyze.File{
+ Name: "test.txt",
+ }
+ result := findCollapsiblePath(file)
+ assert.Nil(t, result)
+
+ // Test case 2: Directory with files and subdirectories should not collapse
+ dirWithFiles := &analyze.Dir{
+ File: &analyze.File{
+ Name: "mixed",
+ },
+ Files: []fs.Item{
+ &analyze.Dir{
+ File: &analyze.File{
+ Name: "subdir",
+ },
+ Files: []fs.Item{},
+ },
+ &analyze.File{
+ Name: "file.txt",
+ },
+ },
+ }
+ result = findCollapsiblePath(dirWithFiles)
+ assert.Nil(t, result)
+
+ // Test case 3: Directory with multiple subdirectories should not collapse
+ dirWithMultiSubs := &analyze.Dir{
+ File: &analyze.File{
+ Name: "multi",
+ },
+ Files: []fs.Item{
+ &analyze.Dir{
+ File: &analyze.File{
+ Name: "subdir1",
+ },
+ Files: []fs.Item{},
+ },
+ &analyze.Dir{
+ File: &analyze.File{
+ Name: "subdir2",
+ },
+ Files: []fs.Item{},
+ },
+ },
+ }
+ result = findCollapsiblePath(dirWithMultiSubs)
+ assert.Nil(t, result)
+
+ // Test case 4: Single subdirectory chain should collapse
+ deepestDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "deep",
+ },
+ Files: []fs.Item{
+ &analyze.File{
+ Name: "finalfile.txt",
+ },
+ },
+ }
+
+ middleDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "middle",
+ },
+ Files: []fs.Item{deepestDir},
+ }
+
+ rootDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "root",
+ },
+ Files: []fs.Item{middleDir},
+ }
+
+ result = findCollapsiblePath(rootDir)
+ assert.NotNil(t, result)
+ assert.Equal(t, "root/middle/deep", result.DisplayName)
+ assert.Equal(t, deepestDir, result.DeepestDir)
+ assert.Equal(t, []string{"middle", "deep"}, result.Segments)
+
+ // Test case 5: Directory with no subdirectories should not collapse
+ emptyDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "empty",
+ },
+ Files: []fs.Item{},
+ }
+ result = findCollapsiblePath(emptyDir)
+ assert.Nil(t, result)
+}
+
+func TestFindCollapsedParent(t *testing.T) {
+ // Test case 1: Nil current directory
+ result := findCollapsedParent(nil)
+ assert.Nil(t, result)
+
+ // Test case 2: Directory without parent
+ rootDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "root",
+ },
+ Files: []fs.Item{},
+ }
+ result = findCollapsedParent(rootDir)
+ assert.Nil(t, result)
+
+ // Test case 3: Directory in a collapsed chain
+ otherDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "other",
+ },
+ Files: []fs.Item{},
+ }
+
+ grandParent := &analyze.Dir{
+ File: &analyze.File{
+ Name: "grandparent",
+ },
+ Files: []fs.Item{otherDir},
+ }
+ otherDir.SetParent(grandParent)
+
+ parent := &analyze.Dir{
+ File: &analyze.File{
+ Name: "parent",
+ },
+ Files: []fs.Item{},
+ }
+ parent.SetParent(grandParent)
+ grandParent.AddFile(parent)
+
+ child := &analyze.Dir{
+ File: &analyze.File{
+ Name: "child",
+ },
+ Files: []fs.Item{},
+ }
+ child.SetParent(parent)
+ parent.AddFile(child)
+
+ result = findCollapsedParent(child)
+ assert.Equal(t, grandParent, result)
+
+ // Test case 4: Directory not in a collapsed chain
+ normalParent := &analyze.Dir{
+ File: &analyze.File{
+ Name: "normalparent",
+ },
+ Files: []fs.Item{
+ &analyze.File{
+ Name: "file.txt",
+ },
+ },
+ }
+
+ normalChild := &analyze.Dir{
+ File: &analyze.File{
+ Name: "normalchild",
+ },
+ Files: []fs.Item{},
+ }
+ normalChild.SetParent(normalParent)
+ normalParent.AddFile(normalChild)
+
+ result = findCollapsedParent(normalChild)
+ assert.Equal(t, normalParent, result)
+}
+
+func TestFormatCollapsedRow(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, false, false, false)
+
+ // Create a test collapsed path
+ deepDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "deep",
+ Size: 1000,
+ Usage: 800,
+ },
+ Files: []fs.Item{},
+ }
+
+ collapsedPath := &CollapsedPath{
+ DisplayName: "level1/level2/deep",
+ DeepestDir: deepDir,
+ Segments: []string{"level1", "level2", "deep"},
+ }
+
+ // Test normal formatting
+ result := ui.formatCollapsedRow(collapsedPath, 1000, 1000, false, false)
+ assert.Contains(t, result, "level1/level2/deep")
+ assert.Contains(t, result, "/") // Should have directory indicator
+
+ // Test with marked flag
+ ui.markedRows = map[int]struct{}{0: {}}
+ result = ui.formatCollapsedRow(collapsedPath, 1000, 1000, true, false)
+ assert.Contains(t, result, "✓") // Should have marked indicator
+
+ // Test with ignored flag
+ result = ui.formatCollapsedRow(collapsedPath, 1000, 1000, false, true)
+ assert.Contains(t, result, "level1/level2/deep")
+
+ // Test with ShowApparentSize
+ ui.ShowApparentSize = true
+ result = ui.formatCollapsedRow(collapsedPath, 1000, 1000, false, false)
+ assert.Contains(t, result, "level1/level2/deep")
+
+ // Test with showItemCount
+ ui.showItemCount = true
+ result = ui.formatCollapsedRow(collapsedPath, 1000, 1000, false, false)
+ assert.Contains(t, result, "level1/level2/deep")
+
+ // Test with showMtime
+ ui.showMtime = true
+ result = ui.formatCollapsedRow(collapsedPath, 1000, 1000, false, false)
+ assert.Contains(t, result, "level1/level2/deep")
+
+ // Test without colors
+ ui2 := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+ result = ui2.formatCollapsedRow(collapsedPath, 1000, 1000, false, false)
+ assert.Contains(t, result, "level1/level2/deep")
+}
+
+func TestCollapsedPathIntegration(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, false, false, false)
+
+ // Create a directory structure that should be collapsed
+ deepestDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "deepest",
+ Size: 100,
+ Usage: 80,
+ },
+ Files: []fs.Item{
+ &analyze.File{
+ Name: "file.txt",
+ Size: 50,
+ Usage: 40,
+ },
+ },
+ }
+
+ middleDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "middle",
+ Size: 100,
+ Usage: 80,
+ },
+ Files: []fs.Item{deepestDir},
+ }
+
+ topDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "top",
+ Size: 100,
+ Usage: 80,
+ },
+ Files: []fs.Item{middleDir},
+ }
+
+ deepestDir.SetParent(middleDir)
+ middleDir.SetParent(topDir)
+
+ ui.currentDir = topDir
+ ui.topDir = topDir
+ ui.topDirPath = "/test"
+ ui.currentDirPath = "/test"
+ ui.SetCollapsePath(true)
+
+ // Test that showDir properly handles collapsed paths
+ ui.showDir()
+
+ // Test navigation into collapsed path
+ ui.table.Select(1, 0) // Select the collapsed entry
+ cell := ui.table.GetCell(1, 0)
+ assert.NotNil(t, cell)
+
+ ref := cell.GetReference()
+ assert.NotNil(t, ref)
+ assert.Equal(t, deepestDir, ref) // Should reference the deepest directory
+}
--- /dev/null
+package tui
+
+import (
+ "os"
+ "os/exec"
+)
+
+// Execute runs given bin path via exec.Command call
+func Execute(argv0 string, argv, envv []string) error {
+ cmd := exec.Command(argv0, argv...)
+
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ cmd.Stdin = os.Stdin
+ cmd.Env = envv
+
+ return cmd.Run()
+}
--- /dev/null
+//go:build !windows
+
+package tui
+
+import (
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+func getShellBin() string {
+ shellbin, ok := os.LookupEnv("SHELL")
+ if !ok {
+ shellbin = "/bin/bash"
+ }
+ return shellbin
+}
+
+func (ui *UI) spawnShell() {
+ if ui.currentDir == nil {
+ return
+ }
+
+ ui.app.Suspend(func() {
+ if err := os.Chdir(ui.currentDirPath); err != nil {
+ ui.showErr("Error changing directory", err)
+ return
+ }
+
+ if err := ui.exec(getShellBin(), nil, os.Environ()); err != nil {
+ ui.showErr("Error executing shell", err)
+ }
+ })
+}
+
+func stopProcess() error {
+ // chan for signal
+ sigChan := make(chan os.Signal, 1)
+ signal.Notify(sigChan, syscall.SIGCONT)
+ defer signal.Stop(sigChan)
+
+ err := syscall.Kill(syscall.Getpid(), syscall.SIGTSTP)
+ // wait continue
+ <-sigChan
+
+ return err
+}
--- /dev/null
+package tui
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestExecute(t *testing.T) {
+ err := Execute("true", []string{}, []string{})
+
+ assert.Nil(t, err)
+}
--- /dev/null
+package tui
+
+import (
+ "os"
+)
+
+func getShellBin() string {
+ shellbin, ok := os.LookupEnv("COMSPEC")
+ if !ok {
+ shellbin = "C:\\WINDOWS\\System32\\cmd.exe"
+ }
+ return shellbin
+}
+
+func (ui *UI) spawnShell() {
+ if ui.currentDir == nil {
+ return
+ }
+
+ ui.app.Stop()
+
+ if err := os.Chdir(ui.currentDirPath); err != nil {
+ ui.showErr("Error changing directory", err)
+ return
+ }
+ if err := ui.exec(getShellBin(), nil, os.Environ()); err != nil {
+ ui.showErr("Error executing shell", err)
+ }
+}
+
+func stopProcess() error {
+ return nil
+}
--- /dev/null
+package tui
+
+import (
+ "bytes"
+ "os"
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testanalyze"
+ "github.com/dundee/gdu/v5/internal/testapp"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestConfirmExport(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'E', 0))
+
+ assert.True(t, ui.pages.HasPage("export"))
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'n', 0))
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyEnter, 0, 0))
+
+ assert.True(t, ui.pages.HasPage("export"))
+}
+
+func TestExportAnalysis(t *testing.T) {
+ parentDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "parent",
+ },
+ Files: make([]fs.Item, 0, 1),
+ }
+ currentDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "sub",
+ Parent: parentDir,
+ },
+ }
+
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.currentDir = currentDir
+ ui.topDir = parentDir
+
+ ui.exportAnalysis()
+
+ assert.True(t, ui.pages.HasPage("exporting"))
+
+ <-ui.done
+
+ assert.FileExists(t, "export.json")
+ err := os.Remove("export.json")
+ assert.NoError(t, err)
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+}
+
+func TestExportAnalysisEsc(t *testing.T) {
+ parentDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "parent",
+ },
+ Files: make([]fs.Item, 0, 1),
+ }
+ currentDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "sub",
+ Parent: parentDir,
+ },
+ }
+
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.currentDir = currentDir
+ ui.topDir = parentDir
+
+ form := ui.confirmExport()
+ formInputFn := form.GetInputCapture()
+
+ assert.True(t, ui.pages.HasPage("export"))
+
+ formInputFn(tcell.NewEventKey(tcell.KeyEsc, 0, 0))
+
+ assert.False(t, ui.pages.HasPage("export"))
+}
+
+func TestExportAnalysisWithName(t *testing.T) {
+ parentDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "parent",
+ },
+ Files: make([]fs.Item, 0, 1),
+ }
+ currentDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "sub",
+ Parent: parentDir,
+ },
+ }
+
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.currentDir = currentDir
+ ui.topDir = parentDir
+
+ form := ui.confirmExport()
+ // formInputFn := form.GetInputCapture()
+ item := form.GetFormItemByLabel("File name")
+ inputFn := item.(*tview.InputField).InputHandler()
+
+ // send 'n' to input
+ inputFn(tcell.NewEventKey(tcell.KeyRune, 'n', 0), nil)
+ assert.Equal(t, "export.jsonn", ui.exportName)
+
+ assert.True(t, ui.pages.HasPage("export"))
+
+ form.GetButton(0).InputHandler()(tcell.NewEventKey(tcell.KeyEnter, 0, 0), nil)
+
+ assert.True(t, ui.pages.HasPage("exporting"))
+
+ <-ui.done
+
+ assert.FileExists(t, "export.jsonn")
+ err := os.Remove("export.jsonn")
+ assert.NoError(t, err)
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+}
+
+func TestExportAnalysisWithoutRights(t *testing.T) {
+ parentDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "parent",
+ },
+ Files: make([]fs.Item, 0, 1),
+ }
+ currentDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "sub",
+ Parent: parentDir,
+ },
+ }
+
+ _, err := os.Create("export.json")
+ assert.NoError(t, err)
+ err = os.Chmod("export.json", 0)
+ assert.NoError(t, err)
+ defer func() {
+ err = os.Chmod("export.json", 0o755)
+ assert.Nil(t, err)
+ err = os.Remove("export.json")
+ assert.NoError(t, err)
+ }()
+
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.currentDir = currentDir
+ ui.topDir = parentDir
+
+ ui.exportAnalysis()
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.True(t, ui.pages.HasPage("error"))
+}
--- /dev/null
+package tui
+
+import (
+ "path/filepath"
+ "strings"
+
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+func (ui *UI) rebuildFooter() {
+ ui.footer.Clear()
+ if ui.filteringInput != nil {
+ ui.footer.AddItem(ui.filteringInput, 0, 1, ui.filtering)
+ }
+ if ui.typeFilteringInput != nil {
+ ui.footer.AddItem(ui.typeFilteringInput, 0, 1, ui.typeFiltering)
+ }
+ ui.footer.AddItem(ui.footerLabel, 0, 5, false)
+}
+
+func (ui *UI) hideFilterInput() {
+ ui.filterValue = ""
+ ui.filteringInput = nil
+ ui.filtering = false
+ ui.rebuildFooter()
+ ui.app.SetFocus(ui.table)
+}
+
+func (ui *UI) showFilterInput() {
+ if ui.currentDir == nil {
+ return
+ }
+
+ if ui.filteringInput == nil {
+ ui.markedRows = make(map[int]struct{})
+
+ ui.filteringInput = tview.NewInputField()
+ ui.filteringInput.SetLabel("Name: ")
+
+ if !ui.UseColors {
+ ui.filteringInput.SetFieldBackgroundColor(
+ tcell.NewRGBColor(100, 100, 100),
+ )
+ ui.filteringInput.SetFieldTextColor(
+ tcell.NewRGBColor(255, 255, 255),
+ )
+ }
+
+ ui.filteringInput.SetChangedFunc(func(text string) {
+ ui.filterValue = text
+ ui.showDir()
+ })
+ ui.filteringInput.SetDoneFunc(func(key tcell.Key) {
+ if key == tcell.KeyESC {
+ ui.hideFilterInput()
+ ui.showDir()
+ } else {
+ ui.app.SetFocus(ui.table)
+ ui.filtering = false
+ }
+ })
+
+ ui.rebuildFooter()
+ }
+ ui.app.SetFocus(ui.filteringInput)
+ ui.filtering = true
+}
+
+func (ui *UI) hideTypeFilterInput() {
+ ui.typeFilterValue = ""
+ ui.typeFilteringInput = nil
+ ui.typeFiltering = false
+ ui.rebuildFooter()
+ ui.app.SetFocus(ui.table)
+}
+
+func (ui *UI) showTypeFilterInput() {
+ if ui.currentDir == nil {
+ return
+ }
+
+ if ui.typeFilteringInput == nil {
+ ui.markedRows = make(map[int]struct{})
+
+ ui.typeFilteringInput = tview.NewInputField()
+ ui.typeFilteringInput.SetLabel("Type: ")
+
+ if !ui.UseColors {
+ ui.typeFilteringInput.SetFieldBackgroundColor(
+ tcell.NewRGBColor(100, 100, 100),
+ )
+ ui.typeFilteringInput.SetFieldTextColor(
+ tcell.NewRGBColor(255, 255, 255),
+ )
+ }
+
+ ui.typeFilteringInput.SetChangedFunc(func(text string) {
+ ui.typeFilterValue = text
+ ui.showDir()
+ })
+ ui.typeFilteringInput.SetDoneFunc(func(key tcell.Key) {
+ if key == tcell.KeyESC {
+ ui.hideTypeFilterInput()
+ ui.showDir()
+ } else {
+ ui.app.SetFocus(ui.table)
+ ui.typeFiltering = false
+ }
+ })
+
+ ui.rebuildFooter()
+ }
+ ui.app.SetFocus(ui.typeFilteringInput)
+ ui.typeFiltering = true
+}
+
+// matchesTypeFilter returns true if the file name matches the type filter.
+// Directories always match. Files are matched by extension against the
+// comma-separated list in typeFilterValue.
+func (ui *UI) matchesTypeFilter(name string, isDir bool) bool {
+ if ui.typeFilterValue == "" {
+ return true
+ }
+ if isDir {
+ return true
+ }
+
+ ext := strings.ToLower(filepath.Ext(name))
+ if ext == "" {
+ return false
+ }
+ ext = strings.TrimPrefix(ext, ".")
+
+ for _, t := range strings.Split(ui.typeFilterValue, ",") {
+ t = strings.TrimSpace(strings.TrimPrefix(strings.ToLower(t), "."))
+ if t != "" && t == ext {
+ return true
+ }
+ }
+ return false
+}
--- /dev/null
+package tui
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/dundee/gdu/v5/internal/testanalyze"
+ "github.com/dundee/gdu/v5/internal/testapp"
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFiltering(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ // mark the item for deletion
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, ' ', 0))
+ assert.Equal(t, 1, len(ui.markedRows))
+
+ ui.showFilterInput()
+ ui.filterValue = ""
+ ui.showDir()
+
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "ccc") // nothing is filtered
+ // marking should be dropped after sorting
+ assert.Equal(t, 0, len(ui.markedRows))
+
+ ui.filterValue = "aa"
+ ui.showDir()
+
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "aaa") // shows only cccc
+
+ ui.hideFilterInput()
+ ui.showDir()
+
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "ccc") // filtering reset
+}
+
+func TestFilteringWithoutCurrentDir(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+
+ ui.showFilterInput()
+
+ assert.False(t, ui.filtering)
+}
+
+func TestSwitchToTable(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, '/', 0)) // open filtering input
+ handler := ui.filteringInput.InputHandler()
+ handler(tcell.NewEventKey(tcell.KeyRune, 'n', 0), func(p tview.Primitive) {})
+ handler(tcell.NewEventKey(tcell.KeyRune, 'e', 0), func(p tview.Primitive) {})
+ handler(tcell.NewEventKey(tcell.KeyRune, 's', 0), func(p tview.Primitive) {})
+
+ ui.table.Select(0, 0)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0)) // we are filtering, should do nothing
+
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "nested")
+
+ handler(
+ tcell.NewEventKey(tcell.KeyTAB, ' ', 0), func(p tview.Primitive) {},
+ ) // switch focus to table
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyTAB, ' ', 0)) // switch back to input
+ handler(
+ tcell.NewEventKey(tcell.KeyEnter, ' ', 0), func(p tview.Primitive) {},
+ ) // switch back to table
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0)) // open nested dir
+
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "subnested")
+ assert.Empty(t, ui.filterValue) // filtering reset
+}
+
+func TestExitFiltering(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, '/', 0)) // open filtering input
+ handler := ui.filteringInput.InputHandler()
+ ui.filterValue = "xxx"
+ ui.showDir()
+
+ assert.Equal(t, ui.table.GetCell(0, 0).Text, "") // nothing is filtered
+
+ handler(
+ tcell.NewEventKey(tcell.KeyEsc, ' ', 0), func(p tview.Primitive) {},
+ ) // exit filtering
+
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "nested")
+ assert.Empty(t, ui.filterValue) // filtering reset
+}
+
+func createDirWithExtensions() *analyze.Dir {
+ dir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "test_dir",
+ Usage: 1e9,
+ Size: 1e9,
+ Mtime: time.Date(2021, 8, 27, 22, 23, 24, 0, time.UTC),
+ },
+ BasePath: ".",
+ ItemCount: 6,
+ }
+ subdir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "subdir",
+ Usage: 1e6,
+ Size: 1e6,
+ Mtime: time.Date(2021, 8, 27, 22, 23, 24, 0, time.UTC),
+ Parent: dir,
+ },
+ }
+ goFile := &analyze.File{
+ Name: "main.go",
+ Usage: 1e6,
+ Size: 1e6,
+ Mtime: time.Date(2021, 8, 27, 22, 23, 24, 0, time.UTC),
+ Parent: dir,
+ }
+ yamlFile := &analyze.File{
+ Name: "config.yaml",
+ Usage: 1e3,
+ Size: 1e3,
+ Mtime: time.Date(2021, 8, 27, 22, 23, 24, 0, time.UTC),
+ Parent: dir,
+ }
+ jsonFile := &analyze.File{
+ Name: "data.json",
+ Usage: 1e4,
+ Size: 1e4,
+ Mtime: time.Date(2021, 8, 27, 22, 23, 24, 0, time.UTC),
+ Parent: dir,
+ }
+ noExtFile := &analyze.File{
+ Name: "Makefile",
+ Usage: 500,
+ Size: 500,
+ Mtime: time.Date(2021, 8, 27, 22, 23, 24, 0, time.UTC),
+ Parent: dir,
+ }
+ dir.Files = fs.Files{subdir, goFile, yamlFile, jsonFile, noExtFile}
+ return dir
+}
+
+func TestTypeFiltering(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ dir := createDirWithExtensions()
+ ui.currentDir = dir
+ ui.topDir = dir
+ ui.topDirPath = dir.GetPath()
+ ui.showDir()
+
+ rowCount := ui.table.GetRowCount()
+ assert.Equal(t, 5, rowCount) // subdir + main.go + config.yaml + data.json + Makefile
+
+ // activate type filter for "go" files
+ ui.showTypeFilterInput()
+ assert.True(t, ui.typeFiltering)
+
+ ui.typeFilterValue = "go"
+ ui.showDir()
+
+ // should show: subdir (dirs always shown) + main.go
+ assert.True(t, tableContains(ui, "subdir"))
+ assert.True(t, tableContains(ui, "main.go"))
+ assert.False(t, tableContains(ui, "config.yaml"))
+ assert.False(t, tableContains(ui, "data.json"))
+ assert.False(t, tableContains(ui, "Makefile"))
+
+ ui.typeFilterValue = "go,yaml"
+ ui.showDir()
+
+ assert.True(t, tableContains(ui, "subdir"))
+ assert.True(t, tableContains(ui, "main.go"))
+ assert.True(t, tableContains(ui, "config.yaml"))
+ assert.False(t, tableContains(ui, "data.json"))
+
+ // hide type filter resets it
+ ui.hideTypeFilterInput()
+ ui.showDir()
+
+ assert.True(t, tableContains(ui, "main.go"))
+ assert.True(t, tableContains(ui, "config.yaml"))
+ assert.True(t, tableContains(ui, "data.json"))
+ assert.True(t, tableContains(ui, "Makefile"))
+ assert.Empty(t, ui.typeFilterValue)
+}
+
+func TestTypeFilteringWithoutCurrentDir(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+
+ ui.showTypeFilterInput()
+
+ assert.False(t, ui.typeFiltering)
+}
+
+func TestTypeFilteringKeyBinding(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'T', 0))
+
+ assert.True(t, ui.typeFiltering)
+ assert.NotNil(t, ui.typeFilteringInput)
+}
+
+func TestExitTypeFiltering(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'T', 0))
+ handler := ui.typeFilteringInput.InputHandler()
+ ui.typeFilterValue = "go"
+ ui.showDir()
+
+ handler(
+ tcell.NewEventKey(tcell.KeyEsc, ' ', 0), func(p tview.Primitive) {},
+ )
+
+ assert.Empty(t, ui.typeFilterValue)
+ assert.Nil(t, ui.typeFilteringInput)
+ assert.False(t, ui.typeFiltering)
+}
+
+func TestTypeFilterTabSwitch(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ // open type filter, confirm with Enter, then TAB should switch back
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'T', 0))
+ assert.True(t, ui.typeFiltering)
+
+ handler := ui.typeFilteringInput.InputHandler()
+ handler(
+ tcell.NewEventKey(tcell.KeyEnter, ' ', 0), func(p tview.Primitive) {},
+ )
+ assert.False(t, ui.typeFiltering) // focus returned to table
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyTAB, ' ', 0))
+ assert.True(t, ui.typeFiltering) // TAB should switch back to type filter
+}
+
+func TestBothFiltersCoexist(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ dir := createDirWithExtensions()
+ ui.currentDir = dir
+ ui.topDir = dir
+ ui.topDirPath = dir.GetPath()
+
+ // activate both filters
+ ui.showFilterInput()
+ ui.filterValue = "main"
+ ui.showTypeFilterInput()
+ ui.typeFilterValue = "go"
+ ui.showDir()
+
+ assert.True(t, tableContains(ui, "main.go")) // matches both name "main" and type "go"
+ assert.False(t, tableContains(ui, "subdir")) // dir name doesn't contain "main"
+ assert.False(t, tableContains(ui, "data.json")) // doesn't match name or type
+}
+
+func TestMatchesTypeFilter(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+
+ ui.typeFilterValue = "go"
+ assert.True(t, ui.matchesTypeFilter("main.go", false))
+ assert.False(t, ui.matchesTypeFilter("config.yaml", false))
+ assert.True(t, ui.matchesTypeFilter("subdir", true)) // dirs always match
+ assert.False(t, ui.matchesTypeFilter("Makefile", false)) // no extension
+
+ ui.typeFilterValue = "go,yaml"
+ assert.True(t, ui.matchesTypeFilter("main.go", false))
+ assert.True(t, ui.matchesTypeFilter("config.yaml", false))
+ assert.False(t, ui.matchesTypeFilter("data.json", false))
+
+ ui.typeFilterValue = ".go" // with leading dot
+ assert.True(t, ui.matchesTypeFilter("main.go", false))
+
+ ui.typeFilterValue = "GO" // case insensitive
+ assert.True(t, ui.matchesTypeFilter("main.go", false))
+
+ ui.typeFilterValue = "" // empty filter matches all
+ assert.True(t, ui.matchesTypeFilter("anything", false))
+}
+
+func TestTypeFilterInputNoColorAndChangedCallback(t *testing.T) {
+ app, simScreen := testapp.CreateTestAppWithSimScreen(80, 30)
+ defer simScreen.Fini()
+
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ dir := createDirWithExtensions()
+ ui.currentDir = dir
+ ui.topDir = dir
+ ui.topDirPath = dir.GetPath()
+ ui.showDir()
+
+ ui.showTypeFilterInput()
+ assert.NotNil(t, ui.typeFilteringInput)
+
+ handler := ui.typeFilteringInput.InputHandler()
+ handler(tcell.NewEventKey(tcell.KeyRune, 'g', 0), func(p tview.Primitive) {})
+ handler(tcell.NewEventKey(tcell.KeyRune, 'o', 0), func(p tview.Primitive) {})
+
+ assert.Equal(t, "go", ui.typeFilterValue)
+ assert.True(t, tableContains(ui, "main.go"))
+ assert.False(t, tableContains(ui, "config.yaml"))
+}
+
+func TestTypeFilterShowAgainKeepsExistingInput(t *testing.T) {
+ app, simScreen := testapp.CreateTestAppWithSimScreen(80, 30)
+ defer simScreen.Fini()
+
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ dir := createDirWithExtensions()
+ ui.currentDir = dir
+ ui.topDir = dir
+ ui.topDirPath = dir.GetPath()
+ ui.showDir()
+
+ ui.showTypeFilterInput()
+ original := ui.typeFilteringInput
+
+ ui.showTypeFilterInput()
+
+ assert.Equal(t, original, ui.typeFilteringInput)
+ assert.True(t, ui.typeFiltering)
+}
+
+func collectTableTexts(ui *UI) []string {
+ var texts []string
+ for i := 0; i < ui.table.GetRowCount(); i++ {
+ cell := ui.table.GetCell(i, 0)
+ if cell != nil {
+ texts = append(texts, cell.Text)
+ }
+ }
+ return texts
+}
+
+func tableContains(ui *UI, name string) bool {
+ for _, text := range collectTableTexts(ui) {
+ if strings.Contains(text, name) {
+ return true
+ }
+ }
+ return false
+}
--- /dev/null
+package tui
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/dundee/gdu/v5/internal/common"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/rivo/tview"
+)
+
+const (
+ blackOnWhite = "[black:white:-]"
+ whiteOnBlack = "[white:black:-]"
+
+ defaultColor = "[-::]"
+ defaultColorBold = "[::b]"
+)
+
+func (ui *UI) formatFileRow(item fs.Item, maxUsage, maxSize int64, marked, ignored bool) string {
+ part := 0
+ if !ignored {
+ if ui.ShowApparentSize {
+ if size := item.GetSize(); size > 0 {
+ part = int(float64(size) / float64(maxSize) * 100.0)
+ }
+ } else {
+ if usage := item.GetUsage(); usage > 0 {
+ part = int(float64(usage) / float64(maxUsage) * 100.0)
+ }
+ }
+ }
+
+ row := string(item.GetFlag())
+
+ numberColor := fmt.Sprintf(
+ "[%s::b]",
+ ui.resultRow.NumberColor,
+ )
+
+ if ui.UseColors && !marked && !ignored {
+ row += numberColor
+ } else {
+ row += defaultColorBold
+ }
+
+ if ui.ShowApparentSize {
+ row += fmt.Sprintf("%15s", ui.formatSize(item.GetSize(), false, true))
+ } else {
+ row += fmt.Sprintf("%15s", ui.formatSize(item.GetUsage(), false, true))
+ }
+
+ if ui.useOldSizeBar {
+ row += " " + getUsageGraphOld(part) + " "
+ } else {
+ row += getUsageGraph(part)
+ }
+
+ if ui.showItemCount {
+ if ui.UseColors && !marked && !ignored {
+ row += numberColor
+ } else {
+ row += defaultColorBold
+ }
+
+ countToDisplay := item.GetItemCount()
+ if item.IsDir() {
+ countToDisplay--
+ }
+ row += fmt.Sprintf("%11s ", ui.formatCount(countToDisplay))
+ }
+
+ if ui.showMtime {
+ if ui.UseColors && !marked && !ignored {
+ row += numberColor
+ } else {
+ row += defaultColorBold
+ }
+ row += fmt.Sprintf(
+ "%s "+defaultColor,
+ item.GetMtime().Format("2006-01-02 15:04:05"),
+ )
+ }
+
+ if len(ui.markedRows) > 0 {
+ if marked {
+ row += string('✓')
+ } else {
+ row += " "
+ }
+ row += " "
+ }
+
+ if item.IsDir() {
+ if ui.UseColors && !marked && !ignored {
+ row += fmt.Sprintf("[%s::b]/", ui.resultRow.DirectoryColor)
+ } else {
+ row += defaultColorBold + "/"
+ }
+ }
+ row += tview.Escape(item.GetName())
+ return row
+}
+
+// formatCollapsedRow formats a collapsed directory path for display
+func (ui *UI) formatCollapsedRow(collapsedPath *CollapsedPath, maxUsage, maxSize int64, marked, ignored bool) string {
+ // Use the deepest directory's stats for display
+ item := collapsedPath.DeepestDir
+
+ part := 0
+ if !ignored {
+ if ui.ShowApparentSize {
+ if size := item.GetSize(); size > 0 {
+ part = int(float64(size) / float64(maxSize) * 100.0)
+ }
+ } else {
+ if usage := item.GetUsage(); usage > 0 {
+ part = int(float64(usage) / float64(maxUsage) * 100.0)
+ }
+ }
+ }
+
+ row := string(item.GetFlag())
+
+ numberColor := fmt.Sprintf(
+ "[%s::b]",
+ ui.resultRow.NumberColor,
+ )
+
+ if ui.UseColors && !marked && !ignored {
+ row += numberColor
+ } else {
+ row += defaultColorBold
+ }
+
+ if ui.ShowApparentSize {
+ row += fmt.Sprintf("%15s", ui.formatSize(item.GetSize(), false, true))
+ } else {
+ row += fmt.Sprintf("%15s", ui.formatSize(item.GetUsage(), false, true))
+ }
+
+ if ui.useOldSizeBar {
+ row += " " + getUsageGraphOld(part) + " "
+ } else {
+ row += getUsageGraph(part)
+ }
+
+ if ui.showItemCount {
+ if ui.UseColors && !marked && !ignored {
+ row += numberColor
+ } else {
+ row += defaultColorBold
+ }
+
+ countToDisplay := item.GetItemCount()
+ if item.IsDir() {
+ countToDisplay--
+ }
+ row += fmt.Sprintf("%11s ", ui.formatCount(countToDisplay))
+ }
+
+ if ui.showMtime {
+ if ui.UseColors && !marked && !ignored {
+ row += numberColor
+ } else {
+ row += defaultColorBold
+ }
+ row += fmt.Sprintf(
+ "%s "+defaultColor,
+ item.GetMtime().Format("2006-01-02 15:04:05"),
+ )
+ }
+
+ if len(ui.markedRows) > 0 {
+ if marked {
+ row += string('✓')
+ } else {
+ row += " "
+ }
+ row += " "
+ }
+
+ // Always display as directory with special formatting for collapsed path
+ if ui.UseColors && !marked && !ignored {
+ row += fmt.Sprintf("[%s::b]/", ui.resultRow.DirectoryColor)
+ } else {
+ row += defaultColorBold + "/"
+ }
+
+ // Display the collapsed path (e.g., "a/b/c")
+ row += tview.Escape(collapsedPath.DisplayName)
+ return row
+}
+
+func (ui *UI) formatSize(size int64, reverseColor, transparentBg bool) string {
+ var color string
+ if reverseColor {
+ if ui.UseColors {
+ color = fmt.Sprintf(
+ "[%s:%s:-]",
+ ui.footerTextColor,
+ ui.footerBackgroundColor,
+ )
+ } else {
+ color = blackOnWhite
+ }
+ } else {
+ if transparentBg {
+ color = defaultColor
+ } else {
+ color = whiteOnBlack
+ }
+ }
+
+ if ui.UseSIPrefix {
+ return formatWithDecPrefix(size, color)
+ }
+ return formatWithBinPrefix(float64(size), color)
+}
+
+func (ui *UI) formatCount(count int64) string {
+ row := ""
+ color := defaultColor
+ count64 := float64(count)
+
+ switch {
+ case count64 >= common.G:
+ row += fmt.Sprintf("%.1f%sG", float64(count)/float64(common.G), color)
+ case count64 >= common.M:
+ row += fmt.Sprintf("%.1f%sM", float64(count)/float64(common.M), color)
+ case count64 >= common.K:
+ row += fmt.Sprintf("%.1f%sk", float64(count)/float64(common.K), color)
+ default:
+ row += fmt.Sprintf("%d%s", count, color)
+ }
+ return row
+}
+
+func formatWithBinPrefix(fsize float64, color string) string {
+ asize := math.Abs(fsize)
+
+ switch {
+ case asize >= common.Ei:
+ return fmt.Sprintf("%.1f%s EiB", fsize/common.Ei, color)
+ case asize >= common.Pi:
+ return fmt.Sprintf("%.1f%s PiB", fsize/common.Pi, color)
+ case asize >= common.Ti:
+ return fmt.Sprintf("%.1f%s TiB", fsize/common.Ti, color)
+ case asize >= common.Gi:
+ return fmt.Sprintf("%.1f%s GiB", fsize/common.Gi, color)
+ case asize >= common.Mi:
+ return fmt.Sprintf("%.1f%s MiB", fsize/common.Mi, color)
+ case asize >= common.Ki:
+ return fmt.Sprintf("%.1f%s KiB", fsize/common.Ki, color)
+ default:
+ return fmt.Sprintf("%d%s B", int64(fsize), color)
+ }
+}
+
+func formatWithDecPrefix(size int64, color string) string {
+ fsize := float64(size)
+ asize := math.Abs(fsize)
+ switch {
+ case asize >= common.E:
+ return fmt.Sprintf("%.1f%s EB", fsize/common.E, color)
+ case asize >= common.P:
+ return fmt.Sprintf("%.1f%s PB", fsize/common.P, color)
+ case asize >= common.T:
+ return fmt.Sprintf("%.1f%s TB", fsize/common.T, color)
+ case asize >= common.G:
+ return fmt.Sprintf("%.1f%s GB", fsize/common.G, color)
+ case asize >= common.M:
+ return fmt.Sprintf("%.1f%s MB", fsize/common.M, color)
+ case asize >= common.K:
+ return fmt.Sprintf("%.1f%s kB", fsize/common.K, color)
+ default:
+ return fmt.Sprintf("%d%s B", size, color)
+ }
+}
--- /dev/null
+package tui
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testapp"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFormatSize(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+
+ assert.Equal(t, "1[white:black:-] B", ui.formatSize(1, false, false))
+ assert.Equal(t, "1.0[white:black:-] KiB", ui.formatSize(1<<10, false, false))
+ assert.Equal(t, "1.0[white:black:-] MiB", ui.formatSize(1<<20, false, false))
+ assert.Equal(t, "1.0[white:black:-] GiB", ui.formatSize(1<<30, false, false))
+ assert.Equal(t, "1.0[white:black:-] TiB", ui.formatSize(1<<40, false, false))
+ assert.Equal(t, "1.0[white:black:-] PiB", ui.formatSize(1<<50, false, false))
+ assert.Equal(t, "1.0[white:black:-] EiB", ui.formatSize(1<<60, false, false))
+ assert.Equal(t, "-1.0[white:black:-] KiB", ui.formatSize(-1<<10, false, false))
+}
+
+func TestFormatSizeDec(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, true)
+
+ assert.Equal(t, "1[white:black:-] B", ui.formatSize(1, false, false))
+ assert.Equal(t, "1.0[white:black:-] kB", ui.formatSize(1<<10, false, false))
+ assert.Equal(t, "1.0[white:black:-] MB", ui.formatSize(1<<20, false, false))
+ assert.Equal(t, "1.1[white:black:-] GB", ui.formatSize(1<<30, false, false))
+ assert.Equal(t, "1.1[white:black:-] TB", ui.formatSize(1<<40, false, false))
+ assert.Equal(t, "1.1[white:black:-] PB", ui.formatSize(1<<50, false, false))
+ assert.Equal(t, "1.2[white:black:-] EB", ui.formatSize(1<<60, false, false))
+ assert.Equal(t, "-1.0[white:black:-] kB", ui.formatSize(-1<<10, false, false))
+}
+
+func TestFormatCount(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+
+ assert.Equal(t, "1[-::]", ui.formatCount(1))
+ assert.Equal(t, "1.0[-::]k", ui.formatCount(1<<10))
+ assert.Equal(t, "1.0[-::]M", ui.formatCount(1<<20))
+ assert.Equal(t, "1.1[-::]G", ui.formatCount(1<<30))
+}
+
+func TestEscapeName(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+
+ dir := &analyze.Dir{
+ File: &analyze.File{
+ Usage: 10,
+ },
+ }
+
+ file := &analyze.File{
+ Name: "Aaa [red] bbb",
+ Parent: dir,
+ Usage: 10,
+ }
+
+ assert.Contains(t, ui.formatFileRow(file, file.GetUsage(), file.GetSize(), false, false), "Aaa [red[] bbb")
+}
+
+func TestMarked(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+ ui.markedRows[0] = struct{}{}
+ ui.useOldSizeBar = true
+
+ dir := &analyze.Dir{
+ File: &analyze.File{
+ Usage: 10,
+ },
+ }
+
+ file := &analyze.File{
+ Name: "Aaa",
+ Parent: dir,
+ Usage: 10,
+ }
+
+ assert.Contains(t, ui.formatFileRow(file, file.GetUsage(), file.GetSize(), true, false), "✓ Aaa")
+ assert.Contains(t, ui.formatFileRow(file, file.GetUsage(), file.GetSize(), false, false), "[##########] Aaa")
+}
+
+func TestIgnored(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+ ui.ignoredRows[0] = struct{}{}
+ ui.useOldSizeBar = true
+
+ dir := &analyze.Dir{
+ File: &analyze.File{
+ Usage: 10,
+ },
+ }
+
+ file := &analyze.File{
+ Name: "Aaa",
+ Parent: dir,
+ Usage: 10,
+ }
+
+ assert.Contains(t, ui.formatFileRow(file, file.GetUsage(), file.GetSize(), false, true), "[ ] Aaa")
+ assert.Contains(t, ui.formatFileRow(file, file.GetUsage(), file.GetSize(), false, false), "[##########] Aaa")
+}
+
+func TestSizeBar(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+
+ dir := &analyze.Dir{
+ File: &analyze.File{
+ Usage: 10,
+ },
+ }
+
+ file := &analyze.File{
+ Name: "Aaa",
+ Parent: dir,
+ Usage: 10,
+ }
+
+ assert.Contains(t, ui.formatFileRow(file, file.GetUsage(), file.GetSize(), false, false), "██████████▏Aaa")
+}
+
+func TestOldSizeBar(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+ ui.markedRows[0] = struct{}{}
+ ui.useOldSizeBar = true
+
+ dir := &analyze.Dir{
+ File: &analyze.File{
+ Usage: 20,
+ },
+ }
+
+ file := &analyze.File{
+ Name: "Aaa",
+ Parent: dir,
+ Usage: 10,
+ }
+
+ assert.Contains(t, ui.formatFileRow(file, dir.GetUsage(), dir.GetSize(), false, false), "[##### ] Aaa")
+}
--- /dev/null
+package tui
+
+import (
+ "fmt"
+ "path/filepath"
+ "time"
+
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+var analyzeParentPath = func(ui *UI, path string, parentDir fs.Item) error {
+ return ui.AnalyzePath(path, parentDir)
+}
+
+func (ui *UI) keyPressed(key *tcell.EventKey) *tcell.EventKey {
+ if ui.handleCtrlZ(key) == nil {
+ return nil
+ }
+
+ if ui.pages.HasPage("file") || ui.pages.HasPage("export") {
+ return key // send event to primitive
+ }
+ if ui.filtering || ui.typeFiltering {
+ return key
+ }
+
+ key = ui.handleClosingModals(key)
+ if key == nil {
+ return nil
+ }
+ key = ui.handleInfoPageEvents(key)
+ if key == nil {
+ return nil
+ }
+ key = ui.handleQuit(key)
+ if key == nil {
+ return nil
+ }
+
+ if ui.pages.HasPage("confirm") {
+ return ui.handleConfirmation(key)
+ }
+
+ if ui.pages.HasPage("progress") ||
+ ui.pages.HasPage("deleting") ||
+ ui.pages.HasPage("emptying") {
+ return key
+ }
+
+ key = ui.handleHelp(key)
+ if key == nil {
+ return nil
+ }
+
+ if ui.pages.HasPage("help") {
+ return key
+ }
+
+ key = ui.handleShell(key)
+ if key == nil {
+ return nil
+ }
+
+ key = ui.handleLeftRight(key)
+ if key == nil {
+ return nil
+ }
+
+ key = ui.handleFiltering(key)
+ if key == nil {
+ return nil
+ }
+
+ return ui.handleMainActions(key)
+}
+
+func (ui *UI) handleClosingModals(key *tcell.EventKey) *tcell.EventKey {
+ if key.Key() == tcell.KeyEsc || key.Rune() == 'q' {
+ if ui.pages.HasPage("help") {
+ ui.pages.RemovePage("help")
+ ui.app.SetFocus(ui.table)
+ return nil
+ }
+ if ui.pages.HasPage("info") {
+ ui.pages.RemovePage("info")
+ ui.app.SetFocus(ui.table)
+ return nil
+ }
+ }
+ return key
+}
+
+func (ui *UI) handleConfirmation(key *tcell.EventKey) *tcell.EventKey {
+ if key.Rune() == 'h' {
+ return tcell.NewEventKey(tcell.KeyLeft, 0, 0)
+ }
+ if key.Rune() == 'l' {
+ return tcell.NewEventKey(tcell.KeyRight, 0, 0)
+ }
+ return key
+}
+
+func (ui *UI) handleInfoPageEvents(key *tcell.EventKey) *tcell.EventKey {
+ if ui.pages.HasPage("info") {
+ switch key.Rune() {
+ case 'i':
+ ui.pages.RemovePage("info")
+ ui.app.SetFocus(ui.table)
+ return nil
+ case '?':
+ return nil
+ }
+
+ if key.Key() == tcell.KeyUp ||
+ key.Key() == tcell.KeyDown ||
+ key.Rune() == 'j' ||
+ key.Rune() == 'k' {
+ row, column := ui.table.GetSelection()
+ if (key.Key() == tcell.KeyUp || key.Rune() == 'k') && row > 0 {
+ row--
+ } else if (key.Key() == tcell.KeyDown || key.Rune() == 'j') &&
+ row+1 < ui.table.GetRowCount() {
+ row++
+ }
+ ui.table.Select(row, column)
+ }
+ ui.showInfo() // refresh file info after any change
+ }
+ return key
+}
+
+// handle ctrl+z job control
+func (ui *UI) handleCtrlZ(key *tcell.EventKey) *tcell.EventKey {
+ if key.Key() == tcell.KeyCtrlZ {
+ ui.app.Suspend(func() {
+ termApp := ui.app.(*tview.Application)
+ termApp.Lock()
+ defer termApp.Unlock()
+
+ err := stopProcess()
+ if err != nil {
+ ui.showErr("Error sending STOP signal", err)
+ }
+ })
+ return nil
+ }
+
+ return key
+}
+
+func (ui *UI) handleQuit(key *tcell.EventKey) *tcell.EventKey {
+ switch key.Rune() {
+ case 'Q':
+ ui.app.Stop()
+ fmt.Fprintf(ui.output, "%s\n", ui.currentDirPath)
+ return nil
+ case 'q':
+ ui.app.Stop()
+ return nil
+ }
+ return key
+}
+
+func (ui *UI) handleHelp(key *tcell.EventKey) *tcell.EventKey {
+ if key.Rune() == '?' {
+ if ui.pages.HasPage("help") {
+ ui.pages.RemovePage("help")
+ ui.app.SetFocus(ui.table)
+ return nil
+ }
+ ui.showHelp()
+ return nil
+ }
+ return key
+}
+
+func (ui *UI) handleShell(key *tcell.EventKey) *tcell.EventKey {
+ if key.Rune() == 'b' {
+ if ui.isInArchive() {
+ ui.showErr("Spawning shell is not supported in archives", nil)
+ return nil
+ }
+ if ui.noSpawnShell {
+ previousHeaderText := ui.header.GetText(false)
+
+ // show feedback to user
+ ui.header.SetText(" Shell spawning is disabled!")
+
+ go func() {
+ time.Sleep(2 * time.Second)
+ ui.app.QueueUpdateDraw(func() {
+ ui.header.Clear()
+ ui.header.SetText(previousHeaderText)
+ })
+ }()
+
+ return nil
+ }
+ ui.spawnShell()
+ return nil
+ }
+ return key
+}
+
+func (ui *UI) handleLeftRight(key *tcell.EventKey) *tcell.EventKey {
+ if key.Rune() == 'h' || key.Key() == tcell.KeyLeft {
+ ui.handleLeft()
+ return nil
+ }
+
+ if key.Rune() == 'l' || key.Key() == tcell.KeyRight {
+ ui.handleRight()
+ return nil
+ }
+ return key
+}
+
+func (ui *UI) handleFiltering(key *tcell.EventKey) *tcell.EventKey {
+ if key.Key() != tcell.KeyTab {
+ return key
+ }
+ if ui.filteringInput != nil {
+ ui.filtering = true
+ ui.app.SetFocus(ui.filteringInput)
+ return nil
+ }
+ if ui.typeFilteringInput != nil {
+ ui.typeFiltering = true
+ ui.app.SetFocus(ui.typeFilteringInput)
+ return nil
+ }
+ return key
+}
+
+// nolint: funlen // Why: there's a lot of options to handle
+func (ui *UI) handleMainActions(key *tcell.EventKey) *tcell.EventKey {
+ switch key.Rune() {
+ case 'd':
+ if ui.isInArchive() {
+ ui.showErr("Deletion is not supported in archives", nil)
+ return nil
+ }
+ ui.handleDelete(false)
+ case 'e':
+ if ui.isInArchive() {
+ ui.showErr("Deletion is not supported in archives", nil)
+ return nil
+ }
+ ui.handleDelete(true)
+ case 'v':
+ if ui.isInArchive() {
+ ui.showErr("Viewing content is not supported in archives", nil)
+ return nil
+ }
+ if ui.noViewFile {
+ previousHeaderText := ui.header.GetText(false)
+
+ ui.header.SetText(" Viewing files is disabled!")
+
+ go func() {
+ time.Sleep(2 * time.Second)
+ ui.app.QueueUpdateDraw(func() {
+ ui.header.Clear()
+ ui.header.SetText(previousHeaderText)
+ })
+ }()
+
+ return nil
+ }
+ ui.showFile()
+ case 'o':
+ if ui.noSpawnShell {
+ previousHeaderText := ui.header.GetText(false)
+
+ // show feedback to user
+ ui.header.SetText(" Opening items is disabled!")
+
+ go func() {
+ time.Sleep(2 * time.Second)
+ ui.app.QueueUpdateDraw(func() {
+ ui.header.Clear()
+ ui.header.SetText(previousHeaderText)
+ })
+ }()
+ return nil
+ }
+ ui.openItem()
+ case 'i':
+ ui.showInfo()
+ case 'a', 'B', 'c', 'm':
+ ui.handleToggles(key)
+ case 'r':
+ if ui.currentDir != nil {
+ ui.rescanDir()
+ }
+ case 'E':
+ ui.confirmExport()
+ return nil
+ case 's', 'C', 'n', 'M':
+ ui.handleSorting(key)
+ case '/':
+ ui.showFilterInput()
+ return nil
+ case 'T':
+ ui.showTypeFilterInput()
+ return nil
+ case ' ':
+ ui.handleMark()
+ case 'I':
+ ui.ignoreItem()
+ }
+ return key
+}
+
+func (ui *UI) handleToggles(key *tcell.EventKey) {
+ switch key.Rune() {
+ case 'a':
+ ui.ShowApparentSize = !ui.ShowApparentSize
+ case 'B':
+ ui.ShowRelativeSize = !ui.ShowRelativeSize
+ case 'c':
+ ui.showItemCount = !ui.showItemCount
+ case 'm':
+ ui.showMtime = !ui.showMtime
+ }
+ if ui.currentDir != nil {
+ row, column := ui.table.GetSelection()
+ ui.showDir()
+ ui.table.Select(row, column)
+ }
+}
+
+func (ui *UI) handleSorting(key *tcell.EventKey) {
+ switch key.Rune() {
+ case 's':
+ ui.setSorting("size")
+ case 'C':
+ ui.setSorting("itemCount")
+ case 'n':
+ ui.setSorting("name")
+ case 'M':
+ ui.setSorting("mtime")
+ }
+}
+
+func (ui *UI) handleLeft() {
+ if ui.currentDirPath == ui.topDirPath {
+ if ui.devices != nil {
+ ui.currentDir = nil
+ err := ui.ListDevices(ui.getter)
+ if err != nil {
+ ui.showErr("Error listing devices", err)
+ }
+ } else if ui.browseParentDirs {
+ ui.analyzeParentOfTopDir()
+ }
+ return
+ }
+ if ui.currentDir != nil {
+ ui.fileItemSelected(0, 0)
+ }
+}
+
+func (ui *UI) analyzeParentOfTopDir() {
+ if ui.currentDir == nil || ui.isInArchive() {
+ return
+ }
+
+ currentPath := ui.currentDir.GetPath()
+ parentPath := filepath.Dir(currentPath)
+ if parentPath == currentPath {
+ return
+ }
+
+ ui.Analyzer.ResetProgress()
+ ui.linkedItems = make(fs.HardLinkedItems)
+
+ if err := analyzeParentPath(ui, parentPath, nil); err != nil {
+ ui.showErr("Error analyzing parent directory", err)
+ }
+}
+
+func (ui *UI) handleRight() {
+ row, column := ui.table.GetSelection()
+ if ui.currentDirPath != ui.topDirPath && row == 0 { // do not select /..
+ return
+ }
+
+ if ui.currentDir != nil {
+ ui.fileItemSelected(row, column)
+ } else {
+ ui.deviceItemSelected(row, column)
+ }
+}
+
+func (ui *UI) handleDelete(shouldEmpty bool) {
+ if ui.currentDir == nil {
+ return
+ }
+ // do not allow deleting parent dir
+ row, column := ui.table.GetSelection()
+ selectedFile, ok := ui.table.GetCell(row, column).GetReference().(fs.Item)
+ if !ok || selectedFile == ui.currentDir.GetParent() {
+ return
+ }
+
+ if ui.askBeforeDelete {
+ ui.confirmDeletion(shouldEmpty)
+ } else {
+ ui.delete(shouldEmpty)
+ }
+}
+
+func (ui *UI) handleMark() {
+ if ui.currentDir == nil {
+ return
+ }
+ // do not allow deleting parent dir
+ row, column := ui.table.GetSelection()
+ selectedFile, ok := ui.table.GetCell(row, column).GetReference().(fs.Item)
+ if !ok || selectedFile == ui.currentDir.GetParent() {
+ return
+ }
+
+ ui.fileItemMarked(row)
+}
+
+func (ui *UI) ignoreItem() {
+ if ui.currentDir == nil {
+ return
+ }
+ // do not allow ignoring parent dir
+ row, column := ui.table.GetSelection()
+ selectedFile, ok := ui.table.GetCell(row, column).GetReference().(fs.Item)
+ if !ok || selectedFile == ui.currentDir.GetParent() {
+ return
+ }
+
+ if _, ok := ui.ignoredRows[row]; ok {
+ delete(ui.ignoredRows, row)
+ } else {
+ ui.ignoredRows[row] = struct{}{}
+ }
+ ui.showDir()
+ // select next row if possible
+ ui.table.Select(min(row+1, ui.table.GetRowCount()-1), 0)
+}
--- /dev/null
+package tui
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/dundee/gdu/v5/internal/testanalyze"
+ "github.com/dundee/gdu/v5/internal/testapp"
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/device"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+ "github.com/stretchr/testify/assert"
+)
+
+type devicesInfoGetterErrMock struct{}
+
+func (m devicesInfoGetterErrMock) GetDevicesInfo() (device.Devices, error) {
+ return nil, fmt.Errorf("failed getting devices")
+}
+
+func (m devicesInfoGetterErrMock) GetMounts() (device.Devices, error) {
+ return nil, fmt.Errorf("failed getting mounts")
+}
+
+func TestShowHelp(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, '?', 0))
+
+ assert.True(t, ui.pages.HasPage("help"))
+}
+
+func TestCloseHelp(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.showHelp()
+
+ assert.True(t, ui.pages.HasPage("help"))
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyEsc, 'q', 0))
+
+ assert.False(t, ui.pages.HasPage("help"))
+}
+
+func TestCloseHelpWithQuestionMark(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.showHelp()
+
+ assert.True(t, ui.pages.HasPage("help"))
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, '?', 0))
+
+ assert.False(t, ui.pages.HasPage("help"))
+}
+
+func TestKeyWhileDeleting(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+
+ modal := tview.NewModal().SetText("Deleting...")
+ ui.pages.AddPage("deleting", modal, true, true)
+
+ key := ui.keyPressed(tcell.NewEventKey(tcell.KeyEnter, ' ', 0))
+ assert.Equal(t, tcell.KeyEnter, key.Key())
+}
+
+func TestLeftRightKeyWhileConfirm(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+
+ modal := tview.NewModal().SetText("Really?")
+ ui.pages.AddPage("confirm", modal, true, true)
+
+ key := ui.keyPressed(tcell.NewEventKey(tcell.KeyLeft, 0, 0))
+ assert.Equal(t, tcell.KeyLeft, key.Key())
+ key = ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 0, 0))
+ assert.Equal(t, tcell.KeyRight, key.Key())
+ key = ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'h', 0))
+ assert.Equal(t, tcell.KeyLeft, key.Key())
+ key = ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'l', 0))
+ assert.Equal(t, tcell.KeyRight, key.Key())
+}
+
+func TestMoveLeftRight(t *testing.T) {
+ origWD, err := os.Getwd()
+ assert.Nil(t, err)
+
+ err = os.Chdir(t.TempDir())
+ assert.Nil(t, err)
+ defer func() {
+ err := os.Chdir(origWD)
+ assert.Nil(t, err)
+ }()
+
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.done = make(chan struct{})
+ ui.browseParentDirs = true
+ err = ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ ui.table.Select(0, 0)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+
+ assert.Equal(t, "nested", ui.currentDir.GetName())
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0)) // try /.. first
+
+ assert.Equal(t, "nested", ui.currentDir.GetName())
+
+ ui.table.Select(1, 0)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+
+ assert.Equal(t, "subnested", ui.currentDir.GetName())
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyLeft, 'h', 0))
+
+ assert.Equal(t, "nested", ui.currentDir.GetName())
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyLeft, 'h', 0))
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyLeft, 'h', 0))
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, filepath.Dir("test_dir"), ui.currentDirPath)
+}
+
+func TestMoveRightOnDevice(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ ui.SetIgnoreDirPaths([]string{})
+ err := ui.ListDevices(getDevicesInfoMock())
+ assert.Nil(t, err)
+
+ ui.table.Select(1, 0)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ // go back to list of devices
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyLeft, 'h', 0))
+
+ assert.Nil(t, ui.currentDir)
+ assert.Equal(t, "/dev/root", ui.table.GetCell(1, 0).GetReference().(*device.Device).Name)
+}
+
+func TestHandleLeftShowsErrorWhenListDevicesFails(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.currentDirPath = "test_dir"
+ ui.topDirPath = "test_dir"
+ ui.devices = device.Devices{&device.Device{Name: "x"}}
+ ui.getter = devicesInfoGetterErrMock{}
+
+ ui.handleLeft()
+
+ assert.True(t, ui.pages.HasPage("error"))
+}
+
+func TestHandleLeftAtTopDirDoesNothingWhenBrowseParentDirsDisabled(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.currentDirPath = "test_dir"
+ ui.topDirPath = "test_dir"
+ ui.currentDir = &analyze.Dir{
+ File: &analyze.File{Name: "test_dir"},
+ BasePath: ".",
+ }
+
+ ui.handleLeft()
+
+ assert.False(t, ui.pages.HasPage("error"))
+ assert.Equal(t, "test_dir", ui.currentDirPath)
+}
+
+func TestAnalyzeParentOfTopDirNilCurrentDir(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.currentDir = nil
+
+ ui.analyzeParentOfTopDir()
+
+ assert.False(t, ui.pages.HasPage("error"))
+}
+
+func TestAnalyzeParentOfTopDirInArchive(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.currentDir = &analyze.ZipDir{Dir: &analyze.Dir{}}
+
+ ui.analyzeParentOfTopDir()
+
+ assert.False(t, ui.pages.HasPage("error"))
+}
+
+func TestAnalyzeParentOfTopDirAtFilesystemRoot(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.currentDir = &analyze.Dir{File: &analyze.File{Name: "/"}}
+
+ ui.analyzeParentOfTopDir()
+
+ assert.False(t, ui.pages.HasPage("error"))
+}
+
+func TestAnalyzeParentOfTopDirShowsErrorWhenAnalyzeFails(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.currentDir = &analyze.Dir{
+ File: &analyze.File{Name: "test_dir"},
+ BasePath: ".",
+ }
+ origAnalyzeParentPath := analyzeParentPath
+ t.Cleanup(func() {
+ analyzeParentPath = origAnalyzeParentPath
+ })
+ analyzeParentPath = func(ui *UI, path string, parentDir fs.Item) error {
+ return errors.New("boom")
+ }
+
+ ui.analyzeParentOfTopDir()
+
+ assert.True(t, ui.pages.HasPage("error"))
+}
+
+func TestStop(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+
+ key := ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'q', 0))
+ assert.Nil(t, key)
+}
+
+func TestStopWithPrintingPath(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ buff := &bytes.Buffer{}
+ ui := CreateUI(app, simScreen, buff, true, true, false, false)
+
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ key := ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'Q', 0))
+ assert.Nil(t, key)
+
+ assert.Equal(t, "test_dir\n", buff.String())
+}
+
+func TestSpawnShell(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ buff := &bytes.Buffer{}
+ ui := CreateUI(app, simScreen, buff, true, true, false, false)
+ called := false
+ ui.exec = func(argv0 string, argv, envv []string) error {
+ called = true
+ return nil
+ }
+
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ key := ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'b', 0))
+ assert.Nil(t, key)
+ assert.True(t, called)
+}
+
+func TestSpawnShellWithoutDir(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ buff := &bytes.Buffer{}
+ ui := CreateUI(app, simScreen, buff, true, true, false, false)
+ called := false
+ ui.exec = func(argv0 string, argv, envv []string) error {
+ called = true
+ return nil
+ }
+
+ ui.done = make(chan struct{})
+
+ key := ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'b', 0))
+ assert.Nil(t, key)
+ assert.False(t, called)
+}
+
+func TestSpawnShellWithWrongDir(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ buff := &bytes.Buffer{}
+ ui := CreateUI(app, simScreen, buff, true, true, false, false)
+ called := false
+ ui.exec = func(argv0 string, argv, envv []string) error {
+ called = true
+ return nil
+ }
+
+ ui.done = make(chan struct{})
+ ui.currentDir = &analyze.Dir{}
+ ui.currentDirPath = "/xxxxx"
+
+ key := ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'b', 0))
+ assert.Nil(t, key)
+ assert.False(t, called)
+ assert.True(t, ui.pages.HasPage("error"))
+}
+
+func TestSpawnShellWithError(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ buff := &bytes.Buffer{}
+ ui := CreateUI(app, simScreen, buff, true, true, false, false)
+ called := false
+ ui.exec = func(argv0 string, argv, envv []string) error {
+ called = true
+ return errors.New("wrong shell")
+ }
+
+ ui.done = make(chan struct{})
+ ui.currentDir = &analyze.Dir{}
+ ui.currentDirPath = "."
+
+ key := ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'b', 0))
+ assert.Nil(t, key)
+ assert.True(t, called)
+ assert.True(t, ui.pages.HasPage("error"))
+}
+
+func TestSpawnShellWithNoSpawnShell(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ buff := &bytes.Buffer{}
+ ui := CreateUI(app, simScreen, buff, true, true, false, false)
+ called := false
+ ui.exec = func(argv0 string, argv, envv []string) error {
+ called = true
+ return nil
+ }
+
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ ui.SetNoSpawnShell()
+ key := ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'b', 0))
+ assert.Nil(t, key)
+ assert.False(t, called)
+}
+
+func TestOpenItemWithNoSpawnShell(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ buff := &bytes.Buffer{}
+ ui := CreateUI(app, simScreen, buff, true, true, false, false)
+
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ ui.SetNoSpawnShell()
+ key := ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'o', 0))
+ assert.Nil(t, key)
+}
+
+func TestShowConfirm(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.table.Select(1, 0)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'd', 0))
+
+ assert.True(t, ui.pages.HasPage("confirm"))
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, '?', 0))
+
+ assert.False(t, ui.pages.HasPage("help"))
+}
+
+func TestDeleteEmpty(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+
+ key := ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'd', 0))
+ assert.NotNil(t, key)
+}
+
+func TestMarkEmpty(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+
+ key := ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, ' ', 0))
+ assert.NotNil(t, key)
+}
+
+func TestIgnoreEmpty(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+
+ key := ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'I', 0))
+ assert.NotNil(t, key)
+}
+
+func TestDelete(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.askBeforeDelete = false
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'd', 0))
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.NoDirExists(t, "test_dir/nested")
+}
+
+func TestDeleteWithNoDelete(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.SetNoDelete()
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'd', 0))
+
+ assert.DirExists(t, "test_dir/nested")
+}
+
+func TestDeleteMarked(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.askBeforeDelete = false
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, ' ', 0))
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'd', 0))
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.NoDirExists(t, "test_dir/nested")
+}
+
+func TestDeleteParent(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.askBeforeDelete = false
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'd', 0))
+
+ assert.DirExists(t, "test_dir/nested")
+}
+
+func TestMarkParent(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.askBeforeDelete = false
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, ' ', 0))
+
+ assert.Equal(t, len(ui.markedRows), 0)
+}
+
+func TestIgnoreParent(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.askBeforeDelete = false
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'I', 0))
+
+ assert.Equal(t, len(ui.ignoredRows), 0)
+}
+
+func TestEmptyDir(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.askBeforeDelete = false
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'e', 0))
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.DirExists(t, "test_dir/nested")
+ assert.NoDirExists(t, "test_dir/nested/subnested")
+}
+
+func TestMarkedEmptyDir(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.askBeforeDelete = false
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, ' ', 0))
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'e', 0))
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.DirExists(t, "test_dir/nested")
+ assert.NoDirExists(t, "test_dir/nested/subnested")
+}
+
+func TestIgnoreDir(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.askBeforeDelete = false
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0)) // into nested
+ assert.Equal(t, 3, ui.table.GetRowCount())
+
+ ui.table.Select(1, 0) // subnested
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'I', 0)) // ignore subnested
+
+ row, _ := ui.table.GetSelection()
+ assert.Equal(t, 2, row) // selection moves to next row
+
+ ui.table.Select(1, 0)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'I', 0)) // unignore subnested
+}
+
+func TestEmptyFile(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.askBeforeDelete = false
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0)) // into nested
+
+ ui.table.Select(2, 0) // file2
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'e', 0))
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.DirExists(t, "test_dir/nested")
+ assert.DirExists(t, "test_dir/nested/subnested")
+}
+
+func TestMarkedEmptyFile(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.askBeforeDelete = false
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0)) // into nested
+
+ ui.table.Select(2, 0) // file2
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, ' ', 0))
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'e', 0))
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.DirExists(t, "test_dir/nested")
+ assert.DirExists(t, "test_dir/nested/subnested")
+}
+
+func TestSortByApparentSize(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'a', 0))
+
+ assert.True(t, ui.ShowApparentSize)
+}
+
+func TestShowFileCount(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, false, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'c', 0))
+
+ assert.True(t, ui.showItemCount)
+}
+
+func TestShowFileCountBW(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'c', 0))
+
+ assert.True(t, ui.showItemCount)
+}
+
+func TestShowMtime(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, false, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'm', 0))
+
+ assert.True(t, ui.showMtime)
+}
+
+func TestShowMtimeBW(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'm', 0))
+
+ assert.True(t, ui.showMtime)
+}
+
+func TestShowRelativeBar(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+ assert.False(t, ui.ShowRelativeSize)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'B', 0))
+
+ assert.True(t, ui.ShowRelativeSize)
+}
+
+func TestRescan(t *testing.T) {
+ parentDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "parent",
+ },
+ Files: make([]fs.Item, 0, 1),
+ }
+ currentDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "sub",
+ Parent: parentDir,
+ },
+ }
+
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.currentDir = currentDir
+ ui.topDir = parentDir
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'r', 0))
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+ assert.Equal(t, parentDir, ui.currentDir.GetParent())
+
+ assert.Equal(t, 5, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "/..")
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "ccc")
+}
+
+func TestSorting(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.table.Select(1, 0)
+ // mark the item for deletion
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, ' ', 0))
+ assert.Equal(t, 1, len(ui.markedRows))
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 's', 0))
+ assert.Equal(t, "size", ui.sortBy)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'C', 0))
+ assert.Equal(t, "itemCount", ui.sortBy)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'n', 0))
+ assert.Equal(t, "name", ui.sortBy)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'M', 0))
+ assert.Equal(t, "mtime", ui.sortBy)
+
+ // marking should be dropped after sorting
+ assert.Equal(t, 0, len(ui.markedRows))
+}
+
+func TestShowFile(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.table.Select(0, 0)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.table.Select(2, 0)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'v', 0))
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'q', 0))
+}
+
+func TestShowFileWithNoViewFile(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ ui.SetNoViewFile()
+ ui.table.Select(0, 0)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.table.Select(2, 0)
+ previousHeaderText := ui.header.GetText(false)
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'v', 0))
+
+ assert.False(t, ui.pages.HasPage("file"))
+ assert.Equal(t, " Viewing files is disabled!", ui.header.GetText(false))
+
+ time.Sleep(2100 * time.Millisecond)
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, previousHeaderText, ui.header.GetText(false))
+}
+
+func TestShowInfoAndMoveAround(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'i', 0))
+
+ assert.True(t, ui.pages.HasPage("info"))
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'k', 0)) // move up
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'j', 0)) // move down
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'k', 0)) // move up
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, '?', 0)) // does nothing
+
+ assert.True(t, ui.pages.HasPage("info")) // we can still see info page
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'q', 0))
+
+ assert.False(t, ui.pages.HasPage("info"))
+}
+
+func TestBlockedActionsInArchive(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+
+ // Simulate being in a zip dir
+ zipDir := &analyze.ZipDir{
+ Dir: &analyze.Dir{
+ File: &analyze.File{
+ Name: "test.zip",
+ Flag: 'Z',
+ },
+ },
+ }
+ ui.currentDir = zipDir
+
+ // Test 'd' (delete)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'd', 0))
+ assert.True(t, ui.pages.HasPage("error"))
+ ui.pages.RemovePage("error")
+
+ // Test 'e' (empty)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'e', 0))
+ assert.True(t, ui.pages.HasPage("error"))
+ ui.pages.RemovePage("error")
+
+ // Test 'v' (view)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'v', 0))
+ assert.True(t, ui.pages.HasPage("error"))
+ ui.pages.RemovePage("error")
+
+ // Test 'b' (shell)
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'b', 0))
+ assert.True(t, ui.pages.HasPage("error"))
+ ui.pages.RemovePage("error")
+}
--- /dev/null
+package tui
+
+import (
+ "strconv"
+
+ "golang.org/x/text/cases"
+ "golang.org/x/text/language"
+
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+func (ui *UI) fileItemMarked(row int) {
+ if _, ok := ui.markedRows[row]; ok {
+ delete(ui.markedRows, row)
+ } else {
+ ui.markedRows[row] = struct{}{}
+ }
+ ui.showDir()
+ // select next row if possible
+ ui.table.Select(min(row+1, ui.table.GetRowCount()-1), 0)
+}
+
+func (ui *UI) deleteMarked(shouldEmpty bool) {
+ var action, acting string
+ if shouldEmpty {
+ action = actionEmpty
+ acting = actingEmpty
+ } else {
+ action = actionDelete
+ acting = actingDelete
+ }
+
+ var currentDir fs.Item
+ var markedItems []fs.Item
+ for row := range ui.markedRows {
+ item := ui.table.GetCell(row, 0).GetReference().(fs.Item)
+ markedItems = append(markedItems, item)
+ }
+
+ if ui.deleteInBackground {
+ ui.queueForDeletion(markedItems, shouldEmpty)
+ return
+ }
+
+ modal := tview.NewModal()
+ ui.pages.AddPage(acting, modal, true, true)
+
+ currentRow, _ := ui.table.GetSelection()
+
+ var deleteFun func(fs.Item, fs.Item) error
+
+ go func() {
+ for _, one := range markedItems {
+ ui.app.QueueUpdateDraw(func() {
+ modal.SetText(
+ cases.Title(language.English).String(acting) +
+ " " +
+ tview.Escape(one.GetName()) +
+ "...",
+ )
+ })
+
+ if shouldEmpty && !one.IsDir() {
+ deleteFun = ui.emptier
+ } else {
+ deleteFun = ui.remover
+ }
+
+ var deleteItems []fs.Item
+ if shouldEmpty && one.IsDir() {
+ currentDir = one
+ for file := range currentDir.GetFiles(fs.SortBySize, fs.SortDesc) {
+ deleteItems = append(deleteItems, file)
+ }
+ } else {
+ currentDir = ui.currentDir
+ deleteItems = append(deleteItems, one)
+ }
+
+ for _, item := range deleteItems {
+ if err := deleteFun(currentDir, item); err != nil {
+ msg := "Can't " + action + " " + tview.Escape(one.GetName())
+ ui.app.QueueUpdateDraw(func() {
+ ui.pages.RemovePage(acting)
+ ui.showErr(msg, err)
+ })
+ if ui.done != nil {
+ ui.done <- struct{}{}
+ }
+ return
+ }
+ }
+ }
+
+ ui.app.QueueUpdateDraw(func() {
+ ui.pages.RemovePage(acting)
+ ui.pages.RemovePage(acting)
+ ui.markedRows = make(map[int]struct{})
+ x, y := ui.table.GetOffset()
+ ui.showDir()
+ ui.table.Select(min(currentRow, ui.table.GetRowCount()-1), 0)
+ ui.table.SetOffset(min(x, ui.table.GetRowCount()-1), y)
+ })
+
+ if ui.done != nil {
+ ui.done <- struct{}{}
+ }
+ }()
+}
+
+func (ui *UI) confirmDeletionMarked(shouldEmpty bool) {
+ var action string
+ if shouldEmpty {
+ action = actionEmpty
+ } else {
+ action = actionDelete
+ }
+
+ modal := tview.NewModal().
+ SetText(
+ "Are you sure you want to " +
+ action + " [::b]" +
+ strconv.Itoa(len(ui.markedRows)) +
+ "[::-] items?",
+ ).
+ AddButtons([]string{"no", "yes", "don't ask me again"}).
+ SetDoneFunc(func(buttonIndex int, buttonLabel string) {
+ switch buttonIndex {
+ case 2:
+ ui.askBeforeDelete = false
+ fallthrough
+ case 1:
+ ui.deleteMarked(shouldEmpty)
+ }
+ ui.pages.RemovePage("confirm")
+ })
+
+ if !ui.UseColors {
+ modal.SetBackgroundColor(tcell.ColorGray)
+ } else {
+ modal.SetBackgroundColor(tcell.ColorBlack)
+ }
+ modal.SetBorderColor(tcell.ColorDefault)
+
+ ui.pages.AddPage("confirm", modal, true, true)
+}
--- /dev/null
+package tui
+
+import (
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestItemMarked(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+ ui.done = make(chan struct{})
+
+ ui.fileItemMarked(1)
+ assert.Equal(t, ui.markedRows, map[int]struct{}{1: {}})
+
+ ui.fileItemMarked(1)
+ assert.Equal(t, ui.markedRows, map[int]struct{}{})
+}
--- /dev/null
+package tui
+
+import (
+ "time"
+
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+func (ui *UI) onMouse(event *tcell.EventMouse, action tview.MouseAction) (*tcell.EventMouse, tview.MouseAction) {
+ if event == nil {
+ return nil, action
+ }
+
+ if ui.pages.HasPage("confirm") ||
+ ui.pages.HasPage("progress") ||
+ ui.pages.HasPage("deleting") ||
+ ui.pages.HasPage("emptying") ||
+ ui.pages.HasPage("help") {
+ return nil, action
+ }
+
+ // nolint: exhaustive // Why: we don't need to handle all mouse events
+ switch action {
+ case tview.MouseLeftDoubleClick:
+ row, column := ui.table.GetSelection()
+ if ui.currentDirPath != ui.topDirPath && row == 0 {
+ ui.handleLeft()
+ } else {
+ selectedFile := ui.table.GetCell(row, column).GetReference().(fs.Item)
+ if selectedFile.IsDir() {
+ ui.handleRight()
+ } else {
+ if ui.noViewFile {
+ previousHeaderText := ui.header.GetText(false)
+
+ ui.header.SetText(" Viewing files is disabled!")
+
+ go func() {
+ time.Sleep(2 * time.Second)
+ ui.app.QueueUpdateDraw(func() {
+ ui.header.Clear()
+ ui.header.SetText(previousHeaderText)
+ })
+ }()
+
+ return nil, action
+ }
+ ui.showFile()
+ }
+ }
+ return nil, action
+ case tview.MouseScrollUp, tview.MouseScrollDown:
+ row, column := ui.table.GetSelection()
+ if action == tview.MouseScrollUp && row > 0 {
+ row--
+ } else if action == tview.MouseScrollDown && row+1 < ui.table.GetRowCount() {
+ row++
+ }
+ ui.table.Select(row, column)
+ return nil, action
+ }
+
+ return event, action
+}
--- /dev/null
+package tui
+
+import (
+ "bytes"
+ "testing"
+ "time"
+
+ "github.com/dundee/gdu/v5/internal/testanalyze"
+ "github.com/dundee/gdu/v5/internal/testapp"
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestDoubleClick(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ ui.table.Select(0, 0)
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ ui.onMouse(tcell.NewEventMouse(0, 0, 0, 0), tview.MouseLeftDoubleClick)
+ assert.Equal(t, "nested", ui.currentDir.GetName())
+
+ ui.onMouse(tcell.NewEventMouse(0, 0, 0, 0), tview.MouseLeftDoubleClick)
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ // show file content
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.table.Select(2, 0)
+ selectedFile := ui.table.GetCell(2, 0).GetReference().(fs.Item)
+ assert.Equal(t, selectedFile.GetName(), "file2")
+ ui.onMouse(tcell.NewEventMouse(0, 0, 0, 0), tview.MouseLeftDoubleClick)
+ assert.True(t, ui.pages.HasPage("file"))
+}
+
+func TestDoubleClickNoViewFile(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRight, 'l', 0))
+ ui.table.Select(2, 0)
+ ui.SetNoViewFile()
+ previousHeaderText := ui.header.GetText(false)
+
+ ui.onMouse(tcell.NewEventMouse(0, 0, 0, 0), tview.MouseLeftDoubleClick)
+ assert.False(t, ui.pages.HasPage("file"))
+ assert.Equal(t, " Viewing files is disabled!", ui.header.GetText(false))
+
+ time.Sleep(2100 * time.Millisecond)
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, previousHeaderText, ui.header.GetText(false))
+}
+
+func TestScroll(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ ui.onMouse(tcell.NewEventMouse(0, 0, 0, 0), tview.MouseScrollDown)
+ row, _ := ui.table.GetSelection()
+ assert.Equal(t, row, 1)
+
+ ui.onMouse(tcell.NewEventMouse(0, 0, 0, 0), tview.MouseScrollUp)
+ row, _ = ui.table.GetSelection()
+ assert.Equal(t, row, 0)
+}
+
+func TestScrollWhenPageOpened(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ // open confirm dialog
+ ui.keyPressed(tcell.NewEventKey(tcell.KeyRune, 'd', 0))
+
+ ui.onMouse(tcell.NewEventMouse(0, 0, 0, 0), tview.MouseScrollDown)
+ row, _ := ui.table.GetSelection()
+ // scrolling does nothing
+ assert.Equal(t, 0, row)
+}
+
+func TestEmptyEvent(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+
+ event, action := ui.onMouse(nil, tview.MouseMove)
+ assert.True(t, event == nil)
+ assert.Equal(t, action, tview.MouseMove)
+}
+
+func TestMouseMove(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+
+ event, action := ui.onMouse(tcell.NewEventMouse(0, 0, 0, 0), tview.MouseMove)
+ assert.True(t, event != nil)
+ assert.Equal(t, action, tview.MouseMove)
+}
--- /dev/null
+package tui
+
+import (
+ "time"
+
+ "github.com/dundee/gdu/v5/internal/common"
+ "github.com/dundee/gdu/v5/pkg/path"
+)
+
+func (ui *UI) updateProgress() {
+ color := "[white:black:b]"
+ if ui.UseColors {
+ color = "[red:black:b]"
+ }
+
+ progressChan := ui.Analyzer.GetProgressChan()
+ doneChan := ui.Analyzer.GetDone()
+
+ var progress common.CurrentProgress
+ start := time.Now()
+
+ for {
+ select {
+ case progress = <-progressChan:
+ case <-doneChan:
+ ui.app.QueueUpdateDraw(func() {
+ ui.progress.SetTitle(" Finalizing... ")
+ ui.progress.SetText("Calculating disk usage...")
+ })
+ return
+ }
+
+ func(itemCount int64, totalSize int64, currentItem string) {
+ delta := time.Since(start).Round(time.Second)
+
+ ui.app.QueueUpdateDraw(func() {
+ ui.progress.SetText("Total items: " +
+ color +
+ common.FormatNumber(int64(itemCount)) +
+ "[white:black:-], size: " +
+ color +
+ ui.formatSize(totalSize, false, false) +
+ "[white:black:-], elapsed time: " +
+ color +
+ delta.String() +
+ "[white:black:-]\nCurrent item: [white:black:b]" +
+ path.ShortenPath(currentItem, ui.currentItemNameMaxLen))
+ })
+ }(progress.ItemCount, progress.TotalSize, progress.CurrentItemName)
+
+ time.Sleep(100 * time.Millisecond)
+ }
+}
--- /dev/null
+package tui
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+ log "github.com/sirupsen/logrus"
+
+ "github.com/dundee/gdu/v5/build"
+ "github.com/dundee/gdu/v5/pkg/fs"
+)
+
+var (
+ helpDisabledSuffix = " (disabled)"
+
+ helpText = ` [::b]up/down, k/j [white:black:-]Move cursor up/down
+ [::b]pgup/pgdn, g/G [white:black:-]Move cursor top/bottom
+ [::b]enter, right, l [white:black:-]Go to directory/device
+ [::b]left, h [white:black:-]Go to parent directory
+
+ [::b]r [white:black:-]Rescan current directory
+ [::b]E [white:black:-]Export analysis data to file as JSON
+ [::b]/ [white:black:-]Search items by name
+ [::b]T [white:black:-]Filter items by file type (extension)
+ [::b]a [white:black:-]Toggle between showing disk usage and apparent size
+ [::b]B [white:black:-]Toggle bar alignment to biggest file or directory
+ [::b]c [white:black:-]Show/hide file count
+ [::b]m [white:black:-]Show/hide latest mtime
+ [::b]b [white:black:-]Spawn shell in current directory
+ [::b]q [white:black:-]Quit gdu
+ [::b]Q [white:black:-]Quit gdu and print current directory path
+
+Item under cursor:
+ [::b]d [white:black:-]Delete file or directory
+ [::b]e [white:black:-]Empty file or directory
+ [::b]space [white:black:-]Mark file or directory for deletion
+ [::b]I [white:black:-]Ignore file or directory
+ [::b]v [white:black:-]Show content of file
+ [::b]o [white:black:-]Open file or directory in external program
+ [::b]i [white:black:-]Show info about item
+
+Sort by (twice toggles asc/desc):
+ [::b]n [white:black:-]Sort by name (asc/desc)
+ [::b]s [white:black:-]Sort by size (asc/desc)
+ [::b]C [white:black:-]Sort by file count (asc/desc)
+ [::b]M [white:black:-]Sort by mtime (asc/desc)`
+)
+
+// nolint: funlen // Why: complex function
+func (ui *UI) showDir() {
+ var (
+ totalUsage int64
+ totalSize int64
+ maxUsage int64
+ maxSize int64
+ itemCount int64
+ )
+
+ ui.currentDirPath = ui.currentDir.GetPath()
+
+ if ui.changeCwdFn != nil {
+ err := ui.changeCwdFn(ui.currentDirPath)
+ if err != nil {
+ log.Printf("error setting cwd: %s", err.Error())
+ }
+ log.Printf("changing cwd to %s", ui.currentDirPath)
+ }
+
+ ui.currentDirLabel.SetText("[::b] --- " +
+ tview.Escape(
+ strings.TrimPrefix(ui.currentDirPath, build.RootPathPrefix),
+ ) +
+ " ---").SetDynamicColors(true)
+
+ ui.table.Clear()
+
+ rowIndex := 0
+ if ui.currentDirPath != ui.topDirPath {
+ prefix := " "
+ if len(ui.markedRows) > 0 {
+ prefix += " "
+ }
+
+ cell := tview.NewTableCell(prefix + "[::b]/..")
+
+ // Use the collapsed parent logic to handle navigation back through collapsed paths
+ var collapsedParent fs.Item
+ if ui.collapsePath {
+ collapsedParent = findCollapsedParent(ui.currentDir)
+ } else {
+ collapsedParent = ui.currentDir.GetParent()
+ }
+ cell.SetReference(collapsedParent)
+ cell.SetStyle(tcell.Style{}.Foreground(tcell.ColorDefault))
+ ui.table.SetCell(0, 0, cell)
+ rowIndex++
+ }
+
+ sortBy, sortOrder := ui.getSortParams()
+
+ unlock := ui.currentDir.RLock()
+ defer unlock()
+
+ i := rowIndex
+ maxUsage = 0
+ maxSize = 0
+ for item := range ui.currentDir.GetFiles(sortBy, sortOrder) {
+ if _, ignored := ui.ignoredRows[i]; ignored {
+ i++
+ continue
+ }
+
+ if ui.ShowRelativeSize {
+ if item.GetUsage() > maxUsage {
+ maxUsage = item.GetUsage()
+ }
+ if item.GetSize() > maxSize {
+ maxSize = item.GetSize()
+ }
+ } else {
+ maxSize += item.GetSize()
+ maxUsage += item.GetUsage()
+ }
+ i++
+ }
+
+ for item := range ui.currentDir.GetFiles(sortBy, sortOrder) {
+ if ui.filterValue != "" && !strings.Contains(
+ strings.ToLower(item.GetName()),
+ strings.ToLower(ui.filterValue),
+ ) {
+ continue
+ }
+
+ if !ui.matchesTypeFilter(item.GetName(), item.IsDir()) {
+ continue
+ }
+
+ _, ignored := ui.ignoredRows[rowIndex]
+
+ if !ignored {
+ totalUsage += item.GetUsage()
+ totalSize += item.GetSize()
+ itemCount += item.GetItemCount()
+ }
+
+ _, marked := ui.markedRows[rowIndex]
+
+ var cell *tview.TableCell
+ var reference fs.Item
+
+ // Check if this directory can be collapsed
+ if item.IsDir() {
+ var collapsedPath *CollapsedPath
+ if ui.collapsePath {
+ collapsedPath = findCollapsiblePath(item)
+ }
+
+ if collapsedPath != nil {
+ // Format as collapsed path
+ cell = tview.NewTableCell(ui.formatCollapsedRow(collapsedPath, maxUsage, maxSize, marked, ignored))
+ // Reference should point to the deepest directory for navigation
+ reference = collapsedPath.DeepestDir
+ } else {
+ // Regular directory formatting
+ cell = tview.NewTableCell(ui.formatFileRow(item, maxUsage, maxSize, marked, ignored))
+ reference = item
+ }
+ } else {
+ // Regular file formatting
+ cell = tview.NewTableCell(ui.formatFileRow(item, maxUsage, maxSize, marked, ignored))
+ reference = item
+ }
+
+ cell.SetReference(reference)
+
+ switch {
+ case ignored:
+ cell.SetStyle(tcell.Style{}.Foreground(tview.Styles.SecondaryTextColor))
+ case marked:
+ cell.SetStyle(tcell.Style{}.Foreground(tview.Styles.PrimaryTextColor))
+ cell.SetBackgroundColor(tview.Styles.ContrastBackgroundColor)
+ default:
+ cell.SetStyle(tcell.Style{}.Foreground(tcell.ColorDefault))
+ }
+
+ ui.table.SetCell(rowIndex, 0, cell)
+ rowIndex++
+ }
+
+ var footerNumberColor, footerTextColor string
+ if ui.UseColors {
+ footerNumberColor = fmt.Sprintf(
+ "[%s:%s:b]",
+ ui.footerNumberColor,
+ ui.footerBackgroundColor,
+ )
+ footerTextColor = fmt.Sprintf(
+ "[%s:%s:-]",
+ ui.footerTextColor,
+ ui.footerBackgroundColor,
+ )
+ } else {
+ footerNumberColor = "[black:white:b]"
+ footerTextColor = blackOnWhite
+ }
+
+ selected := ""
+ if len(ui.markedRows) > 0 {
+ selected = " Selected items: " + footerNumberColor +
+ strconv.Itoa(len(ui.markedRows)) + footerTextColor
+ }
+
+ timeFilterText := ui.formatTimeFilterInfo()
+
+ typeFilterText := ui.formatTypeFilterInfo(footerNumberColor, footerTextColor)
+
+ ui.footerLabel.SetText(
+ selected + footerTextColor +
+ " Total disk usage: " +
+ footerNumberColor +
+ ui.formatSize(totalUsage, true, false) +
+ " Apparent size: " +
+ footerNumberColor +
+ ui.formatSize(totalSize, true, false) +
+ " Items: " + footerNumberColor + fmt.Sprintf("%d", itemCount) +
+ footerTextColor +
+ " Sorting by: " + ui.sortBy + " " + ui.sortOrder +
+ typeFilterText +
+ timeFilterText)
+
+ ui.table.Select(0, 0)
+ ui.table.ScrollToBeginning()
+
+ if !ui.filtering && !ui.typeFiltering {
+ ui.app.SetFocus(ui.table)
+ }
+}
+
+func (ui *UI) showDevices() {
+ var totalUsage int64
+
+ ui.table.Clear()
+ ui.table.SetCell(0, 0, tview.NewTableCell("Device name").SetSelectable(false))
+ ui.table.SetCell(0, 1, tview.NewTableCell("Size").SetSelectable(false))
+ ui.table.SetCell(0, 2, tview.NewTableCell("Used").SetSelectable(false))
+ ui.table.SetCell(0, 3, tview.NewTableCell("Used part").SetSelectable(false))
+ ui.table.SetCell(0, 4, tview.NewTableCell("Free").SetSelectable(false))
+ ui.table.SetCell(0, 5, tview.NewTableCell("Mount point").SetSelectable(false))
+
+ var textColor, sizeColor string
+ if ui.UseColors {
+ textColor = "[#3498db:-:b]"
+ sizeColor = "[#edb20a:-:b]"
+ } else {
+ textColor = "[white:-:b]"
+ sizeColor = "[white:-:b]"
+ }
+
+ ui.sortDevices()
+
+ for i, device := range ui.devices {
+ totalUsage += device.GetUsage()
+ ui.table.SetCell(i+1, 0, tview.NewTableCell(textColor+device.Name).SetReference(ui.devices[i]))
+ ui.table.SetCell(i+1, 1, tview.NewTableCell(ui.formatSize(device.Size, false, true)))
+ ui.table.SetCell(i+1, 2, tview.NewTableCell(sizeColor+ui.formatSize(device.Size-device.Free, false, true)))
+ ui.table.SetCell(i+1, 3, tview.NewTableCell(getDeviceUsagePart(device, ui.useOldSizeBar)))
+ ui.table.SetCell(i+1, 4, tview.NewTableCell(ui.formatSize(device.Free, false, true)))
+ ui.table.SetCell(i+1, 5, tview.NewTableCell(textColor+device.MountPoint).SetReference(ui.devices[i]))
+ }
+
+ var footerNumberColor, footerTextColor string
+ if ui.UseColors {
+ footerNumberColor = fmt.Sprintf(
+ "[%s:%s:b]",
+ ui.footerNumberColor,
+ ui.footerBackgroundColor,
+ )
+ footerTextColor = fmt.Sprintf(
+ "[%s:%s:-]",
+ ui.footerTextColor,
+ ui.footerBackgroundColor,
+ )
+ } else {
+ footerNumberColor = "[black:white:b]"
+ footerTextColor = blackOnWhite
+ }
+
+ ui.footerLabel.SetText(
+ " Total usage: " +
+ footerNumberColor +
+ ui.formatSize(totalUsage, true, false) +
+ footerTextColor +
+ " Sorting by: " + ui.sortBy + " " + ui.sortOrder)
+
+ ui.table.Select(1, 0)
+ ui.table.SetSelectedFunc(ui.deviceItemSelected)
+
+ if ui.topDirPath != "" {
+ for i, device := range ui.devices {
+ if device.MountPoint == ui.topDirPath {
+ ui.table.Select(i+1, 0)
+ break
+ }
+ }
+ }
+}
+
+func (ui *UI) showErr(msg string, err error) {
+ text := msg
+ if err != nil {
+ text += ": " + err.Error()
+ }
+
+ modal := tview.NewModal().
+ SetText(text).
+ AddButtons([]string{"ok"}).
+ SetDoneFunc(func(buttonIndex int, buttonLabel string) {
+ ui.pages.RemovePage("error")
+ })
+
+ if !ui.UseColors {
+ modal.SetBackgroundColor(tcell.ColorGray)
+ }
+
+ ui.pages.AddPage("error", modal, true, true)
+ ui.app.SetFocus(modal)
+}
+
+func (ui *UI) showErrFromGo(msg string, err error) {
+ ui.app.QueueUpdateDraw(func() {
+ ui.showErr(msg, err)
+ })
+}
+
+func (ui *UI) showHelp() {
+ text := tview.NewTextView().SetDynamicColors(true)
+ text.SetBorder(true).SetBorderPadding(2, 2, 2, 2)
+ text.SetBorderColor(tcell.ColorDefault)
+ text.SetTitle(" gdu help ")
+ text.SetScrollable(true)
+
+ formattedHelpText := ui.formatHelpTextFor()
+ text.SetText(formattedHelpText)
+
+ maxHeight := strings.Count(formattedHelpText, "\n") + 7
+ _, height := ui.screen.Size()
+ if height > maxHeight {
+ height = maxHeight
+ }
+
+ flex := tview.NewFlex().
+ AddItem(nil, 0, 1, false).
+ AddItem(tview.NewFlex().SetDirection(tview.FlexRow).
+ AddItem(nil, 0, 1, false).
+ AddItem(text, height, 1, false).
+ AddItem(nil, 0, 1, false), 80, 1, false).
+ AddItem(nil, 0, 1, false)
+
+ ui.help = flex
+ ui.pages.AddPage("help", flex, true, true)
+ ui.app.SetFocus(text)
+}
+
+func (ui *UI) formatHelpTextFor() string {
+ lines := strings.Split(helpText, "\n")
+
+ for i, line := range lines {
+ if ui.UseColors {
+ lines[i] = strings.ReplaceAll(
+ strings.ReplaceAll(line, defaultColorBold, "[red]"),
+ whiteOnBlack,
+ "[white]",
+ )
+ }
+
+ isFound := (strings.Contains(line, "Empty file or directory") ||
+ strings.Contains(line, "Delete file or directory"))
+
+ if ui.noDelete && isFound {
+ lines[i] += helpDisabledSuffix
+ } else if ui.noDeleteWithFilter && isFound {
+ lines[i] += " (disabled/filter)"
+ }
+
+ if ui.noSpawnShell && (strings.Contains(line, "Spawn shell in current directory") ||
+ strings.Contains(line, "Open file or directory in external program")) {
+ lines[i] += helpDisabledSuffix
+ }
+
+ if ui.noViewFile && strings.Contains(line, "Show content of file") {
+ lines[i] += helpDisabledSuffix
+ }
+ }
+
+ return strings.Join(lines, "\n")
+}
+
+func (ui *UI) formatTypeFilterInfo(numberColor, textColor string) string {
+ if ui.typeFilterValue == "" {
+ return ""
+ }
+ return " Type filter: " + numberColor + ui.typeFilterValue + textColor
+}
--- /dev/null
+package tui
+
+import (
+ "bufio"
+ "compress/bzip2"
+ "compress/gzip"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/gdamore/tcell/v2"
+ "github.com/h2non/filetype"
+ "github.com/h2non/filetype/matchers"
+ "github.com/pkg/errors"
+ "github.com/rivo/tview"
+ "github.com/ulikunitz/xz"
+
+ "github.com/dundee/gdu/v5/build"
+ "github.com/dundee/gdu/v5/pkg/fs"
+)
+
+func (ui *UI) showFile() *tview.TextView {
+ if ui.currentDir == nil {
+ return nil
+ }
+
+ row, column := ui.table.GetSelection()
+ cell := ui.table.GetCell(row, column)
+ if cell == nil || cell.GetReference() == nil {
+ return nil
+ }
+
+ selectedFile, ok := cell.GetReference().(fs.Item)
+ if !ok || selectedFile == nil || selectedFile.IsDir() {
+ return nil
+ }
+
+ path := selectedFile.GetPath()
+ f, err := os.Open(path)
+ if err != nil {
+ ui.showErr("Error opening file", err)
+ return nil
+ }
+ scanner, err := getScanner(f)
+ if err != nil {
+ ui.showErr("Error reading file", err)
+ return nil
+ }
+
+ totalLines := 0
+
+ file := tview.NewTextView()
+ ui.currentDirLabel.SetText("[::b] --- " +
+ strings.TrimPrefix(path, build.RootPathPrefix) +
+ " ---").SetDynamicColors(true)
+
+ readNextPart := func(linesCount int) int {
+ var err error
+ readLines := 0
+ for scanner.Scan() && readLines <= linesCount {
+ _, err = file.Write(scanner.Bytes())
+ if err != nil {
+ ui.showErr("Error reading file", err)
+ return 0
+ }
+ _, err = file.Write([]byte("\n"))
+ if err != nil {
+ ui.showErr("Error reading file", err)
+ return 0
+ }
+ readLines++
+ }
+ return readLines
+ }
+ totalLines += readNextPart(defaultLinesCount)
+
+ file.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
+ if event.Rune() == 'q' || event.Key() == tcell.KeyESC {
+ err = f.Close()
+ if err != nil {
+ ui.showErr("Error closing file", err)
+ return event
+ }
+ ui.currentDirLabel.SetText("[::b] --- " +
+ strings.TrimPrefix(ui.currentDirPath, build.RootPathPrefix) +
+ " ---").SetDynamicColors(true)
+ ui.pages.RemovePage("file")
+ ui.app.SetFocus(ui.table)
+ return event
+ }
+
+ if event.Rune() == 'j' || event.Rune() == 'G' ||
+ event.Key() == tcell.KeyDown || event.Key() == tcell.KeyPgDn {
+ _, _, _, height := file.GetInnerRect()
+ row, _ := file.GetScrollOffset()
+ if height+row > totalLines-linesThreshold {
+ totalLines += readNextPart(defaultLinesCount)
+ }
+ }
+ return event
+ })
+
+ grid := tview.NewGrid().SetRows(1, 1, 0, 1).SetColumns(0)
+ grid.AddItem(ui.header, 0, 0, 1, 1, 0, 0, false).
+ AddItem(ui.currentDirLabel, 1, 0, 1, 1, 0, 0, false).
+ AddItem(file, 2, 0, 1, 1, 0, 0, true).
+ AddItem(ui.footerLabel, 3, 0, 1, 1, 0, 0, false)
+
+ ui.pages.HidePage("background")
+ ui.pages.AddPage("file", grid, true, true)
+
+ return file
+}
+
+func getScanner(f io.ReadSeeker) (scanner *bufio.Scanner, err error) {
+ // We only have to pass the file header = first 261 bytes
+ head := make([]byte, 261)
+ if _, err = f.Read(head); err != nil {
+ return nil, errors.Wrap(err, "error reading file header")
+ }
+
+ if pos, err := f.Seek(0, 0); pos != 0 || err != nil {
+ return nil, errors.Wrap(err, "error seeking file")
+ }
+ scanner = bufio.NewScanner(f)
+
+ typ, err := filetype.Match(head)
+ if err != nil {
+ return nil, errors.Wrap(err, "error matching file type")
+ }
+
+ switch typ.MIME.Value {
+ case matchers.TypeGz.MIME.Value:
+ r, err := gzip.NewReader(f)
+ if err != nil {
+ return nil, errors.Wrap(err, "error creating gzip reader")
+ }
+ scanner = bufio.NewScanner(r)
+ case matchers.TypeBz2.MIME.Value:
+ r := bzip2.NewReader(f)
+ scanner = bufio.NewScanner(r)
+ case matchers.TypeXz.MIME.Value:
+ r, err := xz.NewReader(f)
+ if err != nil {
+ return nil, errors.Wrap(err, "error creating xz reader")
+ }
+ scanner = bufio.NewScanner(r)
+ }
+
+ return scanner, nil
+}
--- /dev/null
+package tui
+
+import (
+ "bytes"
+ "compress/gzip"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/ulikunitz/xz"
+)
+
+func TestGetScannerForEmptyString(t *testing.T) {
+ r := bytes.NewReader([]byte{})
+ _, err := getScanner(r)
+ assert.ErrorContains(t, err, "EOF")
+}
+
+func TestGetScannerForPlainString(t *testing.T) {
+ r := bytes.NewReader([]byte("hello"))
+ s, err := getScanner(r)
+ assert.Nil(t, err)
+
+ assert.Equal(t, true, s.Scan())
+ assert.Equal(t, "hello", s.Text())
+ assert.Equal(t, nil, s.Err())
+}
+
+func TestGetScannerForGzipped(t *testing.T) {
+ b := bytes.NewBuffer([]byte{})
+ w := gzip.NewWriter(b)
+
+ _, err := w.Write([]byte("hello world"))
+ assert.Nil(t, err)
+
+ err = w.Close()
+ assert.Nil(t, err)
+
+ r := bytes.NewReader(b.Bytes())
+ s, err := getScanner(r)
+ assert.Nil(t, err)
+
+ assert.Equal(t, true, s.Scan())
+ assert.Equal(t, "hello world", s.Text())
+ assert.Equal(t, nil, s.Err())
+}
+
+func TestGetScannerForBzipped(t *testing.T) {
+ r := bytes.NewReader([]byte{
+ // bzip2 header
+ 0x42, 0x5A, 0x68, 0x39,
+ // bzip2 compressed data: "hello"
+ 0x31, 0x41, 0x59, 0x26,
+ 0x53, 0x59, 0xC1, 0xC0,
+ 0x80, 0xE2, 0x00, 0x00,
+ 0x01, 0x41, 0x00, 0x00,
+ 0x10, 0x02, 0x44, 0xA0,
+ 0x00, 0x30, 0xCD, 0x00,
+ 0xC3, 0x46, 0x29, 0x97,
+ 0x17, 0x72, 0x45, 0x38,
+ 0x50, 0x90, 0xC1, 0xC0,
+ 0x80, 0xE2,
+ })
+ s, err := getScanner(r)
+ assert.Nil(t, err)
+
+ assert.Equal(t, true, s.Scan())
+ assert.Equal(t, "hello", s.Text())
+ assert.Equal(t, nil, s.Err())
+}
+
+func TestGetScannerForXzipped(t *testing.T) {
+ b := bytes.NewBuffer([]byte{})
+ w, err := xz.NewWriter(b)
+ assert.Nil(t, err)
+
+ _, err = w.Write([]byte("hello world"))
+ assert.Nil(t, err)
+
+ err = w.Close()
+ assert.Nil(t, err)
+
+ r := bytes.NewReader(b.Bytes())
+ s, err := getScanner(r)
+ assert.Nil(t, err)
+
+ assert.Equal(t, true, s.Scan())
+ assert.Equal(t, "hello world", s.Text())
+ assert.Equal(t, nil, s.Err())
+}
--- /dev/null
+package tui
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testapp"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestHelpNoSpawnShell(t *testing.T) {
+ app, simScreen := testapp.CreateTestAppWithSimScreen(50, 50)
+ defer simScreen.Fini()
+
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.SetNoDelete()
+ ui.SetNoSpawnShell()
+ ui.SetNoViewFile()
+ ui.showHelp()
+
+ assert.True(t, ui.pages.HasPage("help"))
+
+ helpText := ui.formatHelpTextFor()
+
+ assert.True(t, strings.Contains(helpText, "Delete file or directory (disabled)"))
+ assert.True(t, strings.Contains(helpText, "Empty file or directory (disabled)"))
+ assert.True(t, strings.Contains(helpText, "Spawn shell in current directory (disabled)"))
+ assert.True(t, strings.Contains(helpText, "Open file or directory in external program (disabled)"))
+ assert.True(t, strings.Contains(helpText, "Show content of file (disabled)"))
+}
+
+func TestCollapsePathFlag(t *testing.T) {
+ app := testapp.CreateMockedApp(true)
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+
+ // Create a collapsible structure
+ deepestDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "deepest",
+ Usage: 100,
+ Size: 100,
+ },
+ Files: []fs.Item{},
+ }
+ middleDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "middle",
+ Usage: 100,
+ Size: 100,
+ },
+ Files: []fs.Item{deepestDir},
+ }
+ topDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "top",
+ },
+ Files: []fs.Item{middleDir},
+ }
+ deepestDir.SetParent(middleDir)
+ middleDir.SetParent(topDir)
+
+ ui.currentDir = topDir
+ ui.topDir = topDir
+ ui.topDirPath = "top"
+
+ // Default (flag false) -> Should NOT collapse
+ ui.showDir()
+ cell := ui.table.GetCell(0, 0)
+ assert.Contains(t, cell.Text, "middle")
+ assert.NotContains(t, cell.Text, "deepest")
+
+ // Enable flag -> Should collapse
+ ui.SetCollapsePath(true)
+ ui.showDir()
+ cell = ui.table.GetCell(0, 0)
+ assert.Contains(t, cell.Text, "middle/deepest")
+}
--- /dev/null
+package tui
+
+import (
+ "sort"
+
+ "github.com/dundee/gdu/v5/pkg/device"
+ "github.com/dundee/gdu/v5/pkg/fs"
+)
+
+const (
+ nameSortKey = "name"
+ sizeSortKey = "size"
+ itemCountSortKey = "itemCount"
+ mtimeSortKey = "mtime"
+
+ ascOrder = "asc"
+ descOrder = "desc"
+)
+
+// SetDefaultSorting sets the default sorting
+func (ui *UI) SetDefaultSorting(by, order string) {
+ if by != "" {
+ ui.defaultSortBy = by
+ }
+ if order == ascOrder || order == descOrder {
+ ui.defaultSortOrder = order
+ }
+}
+
+func (ui *UI) setSorting(newOrder string) {
+ ui.markedRows = make(map[int]struct{})
+
+ if newOrder == ui.sortBy {
+ if ui.sortOrder == ascOrder {
+ ui.sortOrder = descOrder
+ } else {
+ ui.sortOrder = ascOrder
+ }
+ } else {
+ ui.sortBy = newOrder
+ ui.sortOrder = ascOrder
+ }
+
+ if ui.currentDir != nil {
+ ui.showDir()
+ } else if ui.devices != nil && (newOrder == sizeSortKey || newOrder == nameSortKey) {
+ ui.showDevices()
+ }
+}
+
+// getSortParams returns the current sort parameters as fs.SortBy and fs.SortOrder
+func (ui *UI) getSortParams() (fs.SortBy, fs.SortOrder) {
+ var sortBy fs.SortBy
+ switch ui.sortBy {
+ case nameSortKey:
+ sortBy = fs.SortByName
+ case itemCountSortKey:
+ sortBy = fs.SortByItemCount
+ case mtimeSortKey:
+ sortBy = fs.SortByMtime
+ case sizeSortKey:
+ if ui.ShowApparentSize {
+ sortBy = fs.SortByApparentSize
+ } else {
+ sortBy = fs.SortBySize
+ }
+ default:
+ sortBy = fs.SortBySize
+ }
+
+ sortOrder := fs.SortAsc
+ if ui.sortOrder == descOrder {
+ sortOrder = fs.SortDesc
+ }
+
+ return sortBy, sortOrder
+}
+
+func (ui *UI) sortDevices() {
+ if ui.sortBy == sizeSortKey {
+ if ui.sortOrder == descOrder {
+ sort.Sort(sort.Reverse(device.ByUsedSize(ui.devices)))
+ } else {
+ sort.Sort(device.ByUsedSize(ui.devices))
+ }
+ }
+ if ui.sortBy == nameSortKey {
+ if ui.sortOrder == descOrder {
+ sort.Sort(sort.Reverse(device.ByName(ui.devices)))
+ } else {
+ sort.Sort(device.ByName(ui.devices))
+ }
+ }
+}
--- /dev/null
+package tui
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/dundee/gdu/v5/internal/testanalyze"
+ "github.com/dundee/gdu/v5/internal/testapp"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestAnalyzeByApparentSize(t *testing.T) {
+ ui := getAnalyzedPathWithSorting("size", "desc", true)
+
+ assert.Equal(t, 4, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "ccc")
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "bbb")
+ assert.Contains(t, ui.table.GetCell(2, 0).Text, "aaa")
+ assert.Contains(t, ui.table.GetCell(3, 0).Text, "ddd")
+}
+
+func TestSortByApparentSizeAsc(t *testing.T) {
+ ui := getAnalyzedPathWithSorting("size", "asc", true)
+
+ assert.Equal(t, 4, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "ddd")
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "aaa")
+ assert.Contains(t, ui.table.GetCell(2, 0).Text, "bbb")
+ assert.Contains(t, ui.table.GetCell(3, 0).Text, "ccc")
+}
+
+func TestAnalyzeBySize(t *testing.T) {
+ ui := getAnalyzedPathWithSorting("size", "desc", false)
+
+ assert.Equal(t, 4, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "ccc")
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "bbb")
+ assert.Contains(t, ui.table.GetCell(2, 0).Text, "aaa")
+ assert.Contains(t, ui.table.GetCell(3, 0).Text, "ddd")
+}
+
+func TestSortBySizeAsc(t *testing.T) {
+ ui := getAnalyzedPathWithSorting("size", "asc", false)
+
+ assert.Equal(t, 4, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "ddd")
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "aaa")
+ assert.Contains(t, ui.table.GetCell(2, 0).Text, "bbb")
+ assert.Contains(t, ui.table.GetCell(3, 0).Text, "ccc")
+}
+
+func TestAnalyzeByName(t *testing.T) {
+ ui := getAnalyzedPathWithSorting("name", "desc", false)
+
+ assert.Equal(t, 4, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "ddd")
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "ccc")
+ assert.Contains(t, ui.table.GetCell(2, 0).Text, "bbb")
+ assert.Contains(t, ui.table.GetCell(3, 0).Text, "aaa")
+}
+
+func TestAnalyzeByNameAsc(t *testing.T) {
+ ui := getAnalyzedPathWithSorting("name", "asc", false)
+
+ assert.Equal(t, 4, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "aaa")
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "bbb")
+ assert.Contains(t, ui.table.GetCell(2, 0).Text, "ccc")
+ assert.Contains(t, ui.table.GetCell(3, 0).Text, "ddd")
+}
+
+func TestAnalyzeByItemCount(t *testing.T) {
+ ui := getAnalyzedPathWithSorting("itemCount", "desc", false)
+
+ assert.Equal(t, 4, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "ddd")
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "ccc")
+ assert.Contains(t, ui.table.GetCell(2, 0).Text, "bbb")
+ assert.Contains(t, ui.table.GetCell(3, 0).Text, "aaa")
+}
+
+func TestAnalyzeByItemCountAsc(t *testing.T) {
+ ui := getAnalyzedPathWithSorting("itemCount", "asc", false)
+
+ assert.Equal(t, 4, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "aaa")
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "bbb")
+ assert.Contains(t, ui.table.GetCell(2, 0).Text, "ccc")
+ assert.Contains(t, ui.table.GetCell(3, 0).Text, "ddd")
+}
+
+func TestAnalyzeByMtime(t *testing.T) {
+ ui := getAnalyzedPathWithSorting("mtime", "desc", false)
+
+ assert.Equal(t, 4, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "aaa")
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "bbb")
+ assert.Contains(t, ui.table.GetCell(2, 0).Text, "ccc")
+ assert.Contains(t, ui.table.GetCell(3, 0).Text, "ddd")
+}
+
+func TestAnalyzeByMtimeAsc(t *testing.T) {
+ ui := getAnalyzedPathWithSorting("mtime", "asc", false)
+
+ assert.Equal(t, 4, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "ddd")
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "ccc")
+ assert.Contains(t, ui.table.GetCell(2, 0).Text, "bbb")
+ assert.Contains(t, ui.table.GetCell(3, 0).Text, "aaa")
+}
+
+func TestSetSorting(t *testing.T) {
+ ui := getAnalyzedPathWithSorting("itemCount", "asc", false)
+
+ ui.setSorting("name")
+ assert.Equal(t, "name", ui.sortBy)
+ assert.Equal(t, "asc", ui.sortOrder)
+ ui.setSorting("name")
+ assert.Equal(t, "name", ui.sortBy)
+ assert.Equal(t, "desc", ui.sortOrder)
+ ui.setSorting("name")
+ assert.Equal(t, "name", ui.sortBy)
+ assert.Equal(t, "asc", ui.sortOrder)
+}
+
+func TestSetDEfaultSorting(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ var opts []Option
+ opts = append(opts, func(ui *UI) {
+ ui.SetDefaultSorting("name", "asc")
+ })
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false, opts...)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+
+ if err := ui.AnalyzePath("test_dir", nil); err != nil {
+ panic(err)
+ }
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "name", ui.sortBy)
+ assert.Equal(t, "asc", ui.sortOrder)
+}
+
+func TestSortDevicesByName(t *testing.T) {
+ app, simScreen := testapp.CreateTestAppWithSimScreen(50, 50)
+ defer simScreen.Fini()
+
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, true, false)
+ err := ui.ListDevices(getDevicesInfoMock())
+
+ assert.Nil(t, err)
+
+ ui.setSorting("name") // sort by name asc
+ assert.Equal(t, "/dev/boot", ui.devices[0].Name)
+
+ ui.setSorting("name") // sort by name desc
+ assert.Equal(t, "/dev/root", ui.devices[0].Name)
+}
+
+func TestSortDevicesByUsedSize(t *testing.T) {
+ app, simScreen := testapp.CreateTestAppWithSimScreen(50, 50)
+ defer simScreen.Fini()
+
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, true, false)
+ err := ui.ListDevices(getDevicesInfoMock())
+
+ assert.Nil(t, err)
+
+ ui.setSorting("size") // sort by used size asc
+ assert.Equal(t, "/dev/boot", ui.devices[0].Name)
+
+ ui.setSorting("size") // sort by used size desc
+ assert.Equal(t, "/dev/root", ui.devices[0].Name)
+}
+
+func getAnalyzedPathWithSorting(sortBy string, sortOrder string, apparentSize bool) *UI {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, apparentSize, false, false)
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.done = make(chan struct{})
+ ui.sortBy = sortBy
+ ui.sortOrder = sortOrder
+ if err := ui.AnalyzePath("test_dir", nil); err != nil {
+ panic(err)
+ }
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ return ui
+}
--- /dev/null
+package tui
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+func (ui *UI) toggleStatusBar(show bool) {
+ var textColor, textBgColor tcell.Color
+ if ui.UseColors {
+ textColor = tcell.NewRGBColor(0, 0, 0)
+ textBgColor = tcell.NewRGBColor(36, 121, 208)
+ } else {
+ textColor = tcell.NewRGBColor(0, 0, 0)
+ textBgColor = tcell.NewRGBColor(255, 255, 255)
+ }
+
+ ui.grid.Clear()
+
+ ui.statusMut.Lock()
+ defer ui.statusMut.Unlock()
+
+ if show {
+ ui.status = tview.NewTextView().SetDynamicColors(true)
+ ui.status.SetTextColor(textColor)
+ ui.status.SetBackgroundColor(textBgColor)
+
+ ui.grid.SetRows(1, 1, 0, 1, 1)
+ ui.grid.AddItem(ui.header, 0, 0, 1, 1, 0, 0, false).
+ AddItem(ui.currentDirLabel, 1, 0, 1, 1, 0, 0, false).
+ AddItem(ui.table, 2, 0, 1, 1, 0, 0, true).
+ AddItem(ui.status, 3, 0, 1, 1, 0, 0, false).
+ AddItem(ui.footer, 4, 0, 1, 1, 0, 0, false)
+ return
+ }
+ ui.status = nil
+ ui.grid.SetRows(1, 1, 0, 1)
+ ui.grid.AddItem(ui.header, 0, 0, 1, 1, 0, 0, false).
+ AddItem(ui.currentDirLabel, 1, 0, 1, 1, 0, 0, false).
+ AddItem(ui.table, 2, 0, 1, 1, 0, 0, true).
+ AddItem(ui.footer, 3, 0, 1, 1, 0, 0, false)
+}
+
+func (ui *UI) updateStatusWorker() {
+ for {
+ ui.updateStatus()
+ time.Sleep(500 * time.Millisecond)
+ }
+}
+
+func (ui *UI) updateStatus() {
+ ui.workersMut.Lock()
+ cnt := ui.activeWorkers
+ ui.workersMut.Unlock()
+
+ ui.statusMut.RLock()
+ status := ui.status
+ ui.statusMut.RUnlock()
+
+ if cnt == 0 && status == nil {
+ return
+ }
+
+ if cnt > 0 && status == nil {
+ ui.app.QueueUpdateDraw(func() {
+ ui.toggleStatusBar(true)
+ })
+ } else if cnt == 0 {
+ ui.app.QueueUpdateDraw(func() {
+ ui.toggleStatusBar(false)
+ })
+ return
+ }
+
+ ui.app.QueueUpdateDraw(func() {
+ msg := fmt.Sprintf(" Active background deletions: %d", cnt)
+ ui.statusMut.RLock()
+ ui.status.SetText(msg)
+ ui.statusMut.RUnlock()
+ })
+}
--- /dev/null
+package tui
+
+import (
+ "io"
+ "os"
+ "os/signal"
+ "runtime"
+ "sync"
+ "syscall"
+ "time"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/dundee/gdu/v5/internal/common"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/device"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/dundee/gdu/v5/pkg/remove"
+ "github.com/dundee/gdu/v5/pkg/timefilter"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+// UI struct
+type UI struct {
+ app common.TermApplication
+ screen tcell.Screen
+ output io.Writer
+ currentDir fs.Item
+ topDir fs.Item
+ getter device.DevicesInfoGetter
+ *common.UI
+ grid *tview.Grid
+ header *tview.TextView
+ footer *tview.Flex
+ footerLabel *tview.TextView
+ currentDirLabel *tview.TextView
+ pages *tview.Pages
+ progress *tview.TextView
+ status *tview.TextView
+ help *tview.Flex
+ table *tview.Table
+ filteringInput *tview.InputField
+ typeFilteringInput *tview.InputField
+ done chan struct{}
+ remover func(fs.Item, fs.Item) error
+ emptier func(fs.Item, fs.Item) error
+ exec func(argv0 string, argv []string, envv []string) error
+ changeCwdFn func(string) error
+ linkedItems fs.HardLinkedItems
+ ignoredRows map[int]struct{}
+ markedRows map[int]struct{}
+ deleteQueue chan deleteQueueItem
+ resultRow ResultRow
+ topDirPath string
+ currentDirPath string
+ filterValue string
+ typeFilterValue string
+ sortBy string
+ sortOrder string
+ footerTextColor string
+ footerBackgroundColor string
+ footerNumberColor string
+ headerTextColor string
+ headerBackgroundColor string
+ defaultSortBy string
+ defaultSortOrder string
+ exportName string
+ devices []*device.Device
+ selectedTextColor tcell.Color
+ selectedBackgroundColor tcell.Color
+ currentItemNameMaxLen int
+ activeWorkers int
+ deleteWorkersCount int
+ statusMut sync.RWMutex
+ workersMut sync.Mutex
+ askBeforeDelete bool
+ showItemCount bool
+ showMtime bool
+ filtering bool
+ typeFiltering bool
+ headerHidden bool
+ useOldSizeBar bool
+ noDelete bool
+ noViewFile bool
+ noSpawnShell bool
+ deleteInBackground bool
+ timeFilter *timefilter.TimeFilter
+ timeFilterLoc *time.Location
+ noDeleteWithFilter bool
+ collapsePath bool
+ browseParentDirs bool
+}
+
+type deleteQueueItem struct {
+ item fs.Item
+ shouldEmpty bool
+}
+
+// ResultRow is a struct for a row in the result table
+type ResultRow struct {
+ NumberColor string
+ DirectoryColor string
+}
+
+// Option is optional function customizing the behaviour of UI
+type Option func(ui *UI)
+
+// CreateUI creates the whole UI app
+func CreateUI(
+ app common.TermApplication,
+ screen tcell.Screen,
+ output io.Writer,
+ useColors bool,
+ showApparentSize bool,
+ showRelativeSize bool,
+ useSIPrefix bool,
+ opts ...Option,
+) *UI {
+ ui := &UI{
+ UI: &common.UI{
+ UseColors: useColors,
+ ShowApparentSize: showApparentSize,
+ ShowRelativeSize: showRelativeSize,
+ Analyzer: analyze.CreateAnalyzer(),
+ UseSIPrefix: useSIPrefix,
+ },
+ app: app,
+ screen: screen,
+ output: output,
+ askBeforeDelete: true,
+ showItemCount: false,
+ remover: remove.ItemFromDir,
+ emptier: remove.EmptyFileFromDir,
+ exec: Execute,
+ linkedItems: make(fs.HardLinkedItems, 10),
+ selectedTextColor: tview.Styles.TitleColor,
+ selectedBackgroundColor: tview.Styles.MoreContrastBackgroundColor,
+ currentItemNameMaxLen: 70,
+ defaultSortBy: "size",
+ defaultSortOrder: "desc",
+ ignoredRows: make(map[int]struct{}),
+ markedRows: make(map[int]struct{}),
+ exportName: "export.json",
+ noDelete: false,
+ noViewFile: false,
+ noSpawnShell: false,
+ deleteQueue: make(chan deleteQueueItem, 1000),
+ deleteWorkersCount: 3 * runtime.GOMAXPROCS(0),
+ }
+ for _, o := range opts {
+ o(ui)
+ }
+
+ ui.resetSorting()
+
+ app.SetBeforeDrawFunc(func(screen tcell.Screen) bool {
+ screen.Clear()
+ return false
+ })
+
+ ui.app.SetInputCapture(ui.keyPressed)
+ ui.app.SetMouseCapture(ui.onMouse)
+
+ ui.header = tview.NewTextView()
+ ui.header.SetText(" gdu ~ Use arrow keys to navigate, press ? for help ")
+ ui.header.SetTextColor(tcell.GetColor(ui.headerTextColor))
+ ui.header.SetBackgroundColor(tcell.GetColor(ui.headerBackgroundColor))
+
+ ui.currentDirLabel = tview.NewTextView()
+ ui.currentDirLabel.SetTextColor(tcell.ColorDefault)
+ ui.currentDirLabel.SetBackgroundColor(tcell.ColorDefault)
+
+ ui.table = tview.NewTable().SetSelectable(true, false)
+ ui.table.SetBackgroundColor(tcell.ColorDefault)
+ ui.table.SetSelectedFunc(ui.fileItemSelected)
+
+ if ui.UseColors {
+ ui.table.SetSelectedStyle(tcell.Style{}.
+ Foreground(ui.selectedTextColor).
+ Background(ui.selectedBackgroundColor).Bold(true))
+ } else {
+ ui.table.SetSelectedStyle(tcell.Style{}.
+ Foreground(tcell.ColorWhite).
+ Background(tcell.ColorGray).Bold(true))
+ }
+
+ ui.footerLabel = tview.NewTextView().SetDynamicColors(true)
+ ui.footerLabel.SetTextColor(tcell.GetColor(ui.footerTextColor))
+ ui.footerLabel.SetBackgroundColor(tcell.GetColor(ui.footerBackgroundColor))
+ ui.footerLabel.SetText(" No items to display. ")
+
+ ui.footer = tview.NewFlex()
+ ui.footer.AddItem(ui.footerLabel, 0, 1, false)
+
+ ui.createGrid()
+
+ ui.pages = tview.NewPages().
+ AddPage("background", ui.grid, true, true)
+ ui.pages.SetBackgroundColor(tcell.ColorDefault)
+
+ ui.app.SetRoot(ui.pages, true)
+
+ return ui
+}
+
+// createGrid creates the main grid layout
+func (ui *UI) createGrid() {
+ if ui.headerHidden {
+ ui.grid = tview.NewGrid().SetRows(1, 0, 1).SetColumns(0)
+ ui.grid.AddItem(ui.currentDirLabel, 0, 0, 1, 1, 0, 0, false).
+ AddItem(ui.table, 1, 0, 1, 1, 0, 0, true).
+ AddItem(ui.footer, 2, 0, 1, 1, 0, 0, false)
+ } else {
+ ui.grid = tview.NewGrid().SetRows(1, 1, 0, 1).SetColumns(0)
+ ui.grid.AddItem(ui.header, 0, 0, 1, 1, 0, 0, false).
+ AddItem(ui.currentDirLabel, 1, 0, 1, 1, 0, 0, false).
+ AddItem(ui.table, 2, 0, 1, 1, 0, 0, true).
+ AddItem(ui.footer, 3, 0, 1, 1, 0, 0, false)
+ }
+}
+
+// SetSelectedTextColor sets the color for the highlighted selected text
+func (ui *UI) SetSelectedTextColor(color tcell.Color) {
+ ui.selectedTextColor = color
+}
+
+// SetSelectedBackgroundColor sets the color for the highlighted selected text
+func (ui *UI) SetSelectedBackgroundColor(color tcell.Color) {
+ ui.selectedBackgroundColor = color
+}
+
+// SetFooterTextColor sets the color for the footer text
+func (ui *UI) SetFooterTextColor(color string) {
+ ui.footerTextColor = color
+}
+
+// SetFooterBackgroundColor sets the color for the footer background
+func (ui *UI) SetFooterBackgroundColor(color string) {
+ ui.footerBackgroundColor = color
+}
+
+// SetFooterNumberColor sets the color for the footer number
+func (ui *UI) SetFooterNumberColor(color string) {
+ ui.footerNumberColor = color
+}
+
+// SetHeaderTextColor sets the color for the header text
+func (ui *UI) SetHeaderTextColor(color string) {
+ ui.headerTextColor = color
+}
+
+// SetHeaderBackgroundColor sets the color for the header background
+func (ui *UI) SetHeaderBackgroundColor(color string) {
+ ui.headerBackgroundColor = color
+}
+
+// SetHeaderHidden sets the flag to hide the header
+func (ui *UI) SetHeaderHidden() {
+ ui.headerHidden = true
+}
+
+// SetResultRowDirectoryColor sets the color for the result row directory
+func (ui *UI) SetResultRowDirectoryColor(color string) {
+ ui.resultRow.DirectoryColor = color
+}
+
+// SetResultRowNumberColor sets the color for the result row number
+func (ui *UI) SetResultRowNumberColor(color string) {
+ ui.resultRow.NumberColor = color
+}
+
+// SetCurrentItemNameMaxLen sets the maximum length of the path of the currently processed item
+// to be shown in the progress modal
+func (ui *UI) SetCurrentItemNameMaxLen(maxLen int) {
+ ui.currentItemNameMaxLen = maxLen
+}
+
+// UseOldSizeBar uses the old size bar (# chars) instead of the new one (unicode block elements)
+func (ui *UI) UseOldSizeBar() {
+ ui.useOldSizeBar = true
+}
+
+// SetChangeCwdFn sets function that can be used to change current working dir
+// during dir browsing
+func (ui *UI) SetChangeCwdFn(fn func(string) error) {
+ ui.changeCwdFn = fn
+}
+
+// SetDeleteInParallel sets the flag to delete files in parallel
+func (ui *UI) SetDeleteInParallel() {
+ ui.remover = remove.ItemFromDirParallel
+}
+
+// StartUILoop starts tview application
+func (ui *UI) StartUILoop() error {
+ go func() {
+ c := make(chan os.Signal, 1)
+ signal.Notify(
+ c,
+ syscall.SIGHUP,
+ syscall.SIGINT,
+ syscall.SIGQUIT,
+ syscall.SIGILL,
+ syscall.SIGTRAP,
+ syscall.SIGABRT,
+ syscall.SIGPIPE,
+ syscall.SIGTERM,
+ )
+ s := <-c
+ log.Printf("Got signal: %s", s)
+ ui.app.QueueUpdateDraw(func() {
+ ui.app.Stop()
+ })
+ }()
+
+ return ui.app.Run()
+}
+
+// SetShowItemCount sets the flag to show number of items in directory
+func (ui *UI) SetShowItemCount() {
+ ui.showItemCount = true
+}
+
+// SetShowMTime sets the flag to show last modification time of items in directory
+func (ui *UI) SetShowMTime() {
+ ui.showMtime = true
+}
+
+// SetNoDelete disables all write operations
+func (ui *UI) SetNoDelete() {
+ ui.noDelete = true
+}
+
+// SetNoSpawnShell disables shell spawning
+func (ui *UI) SetNoSpawnShell() {
+ ui.noSpawnShell = true
+}
+
+func (ui *UI) SetNoViewFile() {
+ ui.noViewFile = true
+}
+
+// SetNoDelete disables delete when time filters are active
+func (ui *UI) SetNoDeleteWithFilter() {
+ ui.noDeleteWithFilter = true
+}
+
+// SetBrowseParentDirs enables navigating above the launch directory
+func (ui *UI) SetBrowseParentDirs() {
+ ui.browseParentDirs = true
+}
+
+// SetCollapsePath sets the flag to collapse paths
+func (ui *UI) SetCollapsePath(value bool) {
+ ui.collapsePath = value
+}
+
+// SetDeleteInBackground sets the flag to delete files in background
+func (ui *UI) SetDeleteInBackground() {
+ ui.deleteInBackground = true
+
+ for i := 0; i < ui.deleteWorkersCount; i++ {
+ go ui.deleteWorker()
+ }
+ go ui.updateStatusWorker()
+}
+
+func (ui *UI) resetSorting() {
+ ui.sortBy = ui.defaultSortBy
+ ui.sortOrder = ui.defaultSortOrder
+}
+
+func (ui *UI) rescanDir() {
+ ui.Analyzer.ResetProgress()
+ ui.linkedItems = make(fs.HardLinkedItems)
+ err := ui.AnalyzePath(ui.currentDirPath, ui.currentDir.GetParent())
+ if err != nil {
+ ui.showErr("Error rescanning path", err)
+ }
+}
+
+func (ui *UI) fileItemSelected(row, column int) {
+ if ui.currentDir == nil {
+ return // Add this check to handle nil case
+ }
+
+ selectedDirCell := ui.table.GetCell(row, column)
+
+ // Check if the selectedDirCell is nil before using it
+ if selectedDirCell == nil || selectedDirCell.GetReference() == nil {
+ return
+ }
+
+ selectedDir := selectedDirCell.GetReference().(fs.Item)
+ if selectedDir == nil || !selectedDir.IsDir() {
+ return
+ }
+
+ origDir := ui.currentDir
+ ui.currentDir = selectedDir
+ ui.hideFilterInput()
+ ui.hideTypeFilterInput()
+ ui.markedRows = make(map[int]struct{})
+ ui.ignoredRows = make(map[int]struct{})
+ ui.showDir()
+
+ if row != 0 || origDir.GetPath() == ui.topDir.GetPath() {
+ return
+ }
+
+ // we are going up in the directory tree, select the last visited directory
+ if origDir.GetParent() != nil {
+ nestedDir := origDir
+ for nestedDir.GetParent() != nil {
+ if selectedDir.GetName() == nestedDir.GetParent().GetName() {
+ sortBy, sortOrder := ui.getSortParams()
+ index := -1
+ i := 0
+ for item := range ui.currentDir.GetFiles(sortBy, sortOrder) {
+ if item.GetName() == nestedDir.GetName() {
+ index = i
+ break
+ }
+ i++
+ }
+ if index >= 0 {
+ if ui.currentDir.GetPath() != ui.topDir.GetPath() {
+ index++
+ }
+ ui.table.Select(index, 0)
+ }
+ break
+ }
+ nestedDir = nestedDir.GetParent()
+ }
+ }
+}
+
+func (ui *UI) deviceItemSelected(row, column int) {
+ var err error
+ selectedDevice, ok := ui.table.GetCell(row, column).GetReference().(*device.Device)
+ if !ok {
+ return
+ }
+
+ paths := device.GetNestedMountpointsPaths(selectedDevice.MountPoint, ui.devices)
+ ui.IgnoreDirPathPatterns, err = common.CreateIgnorePattern(paths)
+ if err != nil {
+ log.Printf("Creating path patterns for other devices failed: %s", paths)
+ }
+
+ ui.resetSorting()
+
+ ui.Analyzer.ResetProgress()
+ ui.linkedItems = make(fs.HardLinkedItems)
+ err = ui.AnalyzePath(selectedDevice.MountPoint, nil)
+ if err != nil {
+ ui.showErr("Error analyzing device", err)
+ }
+}
+
+func (ui *UI) confirmDeletion(shouldEmpty bool) {
+ if ui.noDelete {
+ previousHeaderText := ui.header.GetText(false)
+
+ // show feedback to user
+ ui.header.SetText(" Deletion is disabled!")
+
+ go func() {
+ time.Sleep(2 * time.Second)
+ ui.app.QueueUpdateDraw(func() {
+ ui.header.Clear()
+ ui.header.SetText(previousHeaderText)
+ })
+ }()
+
+ return
+ }
+
+ // Check if deletion is allowed with active time filters
+ if ui.noDeleteWithFilter {
+ modal := tview.NewModal().
+ SetText("Deletion is disabled when a time filter is active.\n\n" +
+ "To override, set GDU_ALLOW_DELETE_WITH_FILTER=1").
+ AddButtons([]string{"OK"}).
+ SetDoneFunc(func(buttonIndex int, buttonLabel string) {
+ ui.pages.RemovePage("confirm")
+ })
+ if !ui.UseColors {
+ modal.SetBackgroundColor(tcell.ColorGray)
+ }
+ ui.pages.AddPage("confirm", modal, true, true)
+ return
+ }
+
+ if len(ui.markedRows) > 0 {
+ ui.confirmDeletionMarked(shouldEmpty)
+ } else {
+ ui.confirmDeletionSelected(shouldEmpty)
+ }
+}
+
+func (ui *UI) confirmDeletionSelected(shouldEmpty bool) {
+ row, column := ui.table.GetSelection()
+ selectedFile := ui.table.GetCell(row, column).GetReference().(fs.Item)
+ var action string
+ if shouldEmpty {
+ action = "empty"
+ } else {
+ action = "delete"
+ }
+ modal := tview.NewModal().
+ SetText(
+ "Are you sure you want to " +
+ action +
+ " \"" +
+ tview.Escape(selectedFile.GetName()) +
+ "\"?",
+ ).
+ AddButtons([]string{"no", "yes", "don't ask me again"}).
+ SetDoneFunc(func(buttonIndex int, buttonLabel string) {
+ switch buttonIndex {
+ case 2:
+ ui.askBeforeDelete = false
+ fallthrough
+ case 1:
+ ui.deleteSelected(shouldEmpty)
+ }
+ ui.pages.RemovePage("confirm")
+ })
+
+ if !ui.UseColors {
+ modal.SetBackgroundColor(tcell.ColorGray)
+ } else {
+ modal.SetBackgroundColor(tcell.ColorBlack)
+ }
+ modal.SetBorderColor(tcell.ColorDefault)
+
+ ui.pages.AddPage("confirm", modal, true, true)
+}
+
+// SetTimeFilterWithInfo sets both the time filter function and stores the filter info for display
+func (ui *UI) SetTimeFilterWithInfo(tf *timefilter.TimeFilter, loc *time.Location) {
+ ui.timeFilter = tf
+ ui.timeFilterLoc = loc
+
+ if tf != nil && !tf.IsEmpty() {
+ timeFilterFunc := func(mtime time.Time) bool {
+ return tf.IncludeByTimeFilter(mtime, loc)
+ }
+ ui.SetTimeFilter(timeFilterFunc)
+ if !ui.isDeleteAllowedWithFilter() {
+ ui.SetNoDeleteWithFilter()
+ }
+ }
+}
+
+// hasActiveTimeFilter returns true if any time filter is active
+func (ui *UI) hasActiveTimeFilter() bool {
+ return ui.timeFilter != nil && !ui.timeFilter.IsEmpty()
+}
+
+// formatTimeFilterInfo formats the time filter information for display
+func (ui *UI) formatTimeFilterInfo() string {
+ if !ui.hasActiveTimeFilter() {
+ return ""
+ }
+
+ return ui.timeFilter.FormatForDisplay(ui.timeFilterLoc)
+}
+
+// isDeleteAllowedWithFilter checks if deletion is allowed when filters are active
+func (ui *UI) isDeleteAllowedWithFilter() bool {
+ if !ui.hasActiveTimeFilter() {
+ return true
+ }
+
+ // Check environment variable override
+ if os.Getenv("GDU_ALLOW_DELETE_WITH_FILTER") == "1" {
+ return true
+ }
+
+ return false
+}
--- /dev/null
+package tui
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/dundee/gdu/v5/internal/testanalyze"
+ "github.com/dundee/gdu/v5/internal/testapp"
+ "github.com/dundee/gdu/v5/internal/testdev"
+ "github.com/dundee/gdu/v5/internal/testdir"
+ "github.com/dundee/gdu/v5/pkg/analyze"
+ "github.com/dundee/gdu/v5/pkg/device"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/gdamore/tcell/v2"
+ "github.com/stretchr/testify/assert"
+)
+
+func init() {
+ log.SetLevel(log.WarnLevel)
+}
+
+func TestFooter(t *testing.T) {
+ app, simScreen := testapp.CreateTestAppWithSimScreen(15, 15)
+ defer simScreen.Fini()
+
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+
+ dir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "xxx",
+ Size: 5,
+ Usage: 4096,
+ },
+ BasePath: ".",
+ ItemCount: 2,
+ }
+
+ file := &analyze.File{
+ Name: "yyy",
+ Size: 2,
+ Usage: 4096,
+ Parent: dir,
+ }
+ dir.Files = fs.Files{file}
+
+ ui.currentDir = dir
+ ui.showDir()
+ ui.pages.HidePage("progress")
+
+ ui.footerLabel.Draw(simScreen)
+ simScreen.Show()
+
+ b, _, _ := simScreen.GetContents()
+
+ // printScreen(simScreen)
+
+ text := []byte(" Total disk usage: 4.0 KiB Apparent size: 2 B Items: 1")
+ for i, r := range b {
+ if i >= len(text) {
+ break
+ }
+ assert.Equal(t, string(text[i]), string(r.Bytes[0]), fmt.Sprintf("Index: %d", i))
+ }
+}
+
+func TestUpdateProgress(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, false, false, false)
+ done := ui.Analyzer.GetDone()
+ done.Broadcast()
+ ui.updateProgress()
+ assert.True(t, true)
+}
+
+func TestHelp(t *testing.T) {
+ app, simScreen := testapp.CreateTestAppWithSimScreen(50, 50)
+ defer simScreen.Fini()
+
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+ ui.showHelp()
+
+ assert.True(t, ui.pages.HasPage("help"))
+
+ ui.help.Draw(simScreen)
+ simScreen.Show()
+
+ // printScreen(simScreen)
+
+ b, _, _ := simScreen.GetContents()
+
+ cells := b[557 : 557+9]
+
+ text := []byte("directory")
+ for i, r := range cells {
+ assert.Equal(t, text[i], r.Bytes[0])
+ }
+}
+
+func TestHelpBw(t *testing.T) {
+ app, simScreen := testapp.CreateTestAppWithSimScreen(50, 50)
+ defer simScreen.Fini()
+
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.showHelp()
+ ui.help.Draw(simScreen)
+ simScreen.Show()
+
+ // printScreen(simScreen)
+
+ b, _, _ := simScreen.GetContents()
+
+ cells := b[557 : 557+9]
+
+ text := []byte("directory")
+ for i, r := range cells {
+ assert.Equal(t, text[i], r.Bytes[0])
+ }
+}
+
+func TestAppRun(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(false)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+
+ err := ui.StartUILoop()
+
+ assert.Nil(t, err)
+}
+
+func TestAppRunWithErr(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+
+ err := ui.StartUILoop()
+
+ assert.Equal(t, "Fail", err.Error())
+}
+
+func TestRescanDir(t *testing.T) {
+ parentDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "parent",
+ },
+ Files: make([]fs.Item, 0, 1),
+ }
+ currentDir := &analyze.Dir{
+ File: &analyze.File{
+ Name: "sub",
+ Parent: parentDir,
+ },
+ }
+
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+ ui.done = make(chan struct{})
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ ui.currentDir = currentDir
+ ui.topDir = parentDir
+ ui.rescanDir()
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+ assert.Equal(t, parentDir, ui.currentDir.GetParent())
+
+ assert.Equal(t, 5, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "/..")
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "ccc")
+}
+
+func TestDirSelected(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, true, false, false)
+ ui.done = make(chan struct{})
+
+ ui.fileItemSelected(0, 0)
+
+ assert.Equal(t, 3, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "/..")
+ assert.Contains(t, ui.table.GetCell(1, 0).Text, "subnested")
+}
+
+func TestFileSelected(t *testing.T) {
+ ui := getAnalyzedPathMockedApp(t, true, true, true)
+
+ ui.fileItemSelected(3, 0)
+
+ assert.Equal(t, 4, ui.table.GetRowCount())
+ assert.Contains(t, ui.table.GetCell(0, 0).Text, "ccc")
+}
+
+func TestSelectedWithoutCurrentDir(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+
+ ui.fileItemSelected(1, 0)
+
+ assert.Nil(t, ui.currentDir)
+}
+
+func TestBeforeDraw(t *testing.T) {
+ screen := tcell.NewSimulationScreen("UTF-8")
+ err := screen.Init()
+
+ assert.Nil(t, err)
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, screen, &bytes.Buffer{}, false, true, false, false)
+
+ for _, f := range ui.app.(*testapp.MockedApp).BeforeDraws {
+ assert.False(t, f(screen))
+ }
+}
+
+func TestIgnorePaths(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+
+ ui.SetIgnoreDirPaths([]string{"/aaa", "/bbb"})
+
+ assert.True(t, ui.ShouldDirBeIgnored("aaa", "/aaa"))
+ assert.True(t, ui.ShouldDirBeIgnored("bbb", "/bbb"))
+ assert.False(t, ui.ShouldDirBeIgnored("ccc", "/ccc"))
+}
+
+func TestConfirmDeletion(t *testing.T) {
+ ui := getAnalyzedPathMockedApp(t, true, true, true)
+
+ ui.table.Select(1, 0)
+ ui.confirmDeletion(false)
+
+ assert.True(t, ui.pages.HasPage("confirm"))
+}
+
+func TestConfirmDeletionBW(t *testing.T) {
+ ui := getAnalyzedPathMockedApp(t, false, true, true)
+
+ ui.table.Select(1, 0)
+ ui.confirmDeletion(false)
+
+ assert.True(t, ui.pages.HasPage("confirm"))
+}
+
+func TestConfirmEmpty(t *testing.T) {
+ ui := getAnalyzedPathMockedApp(t, false, true, true)
+
+ ui.table.Select(1, 0)
+ ui.confirmDeletion(true)
+
+ assert.True(t, ui.pages.HasPage("confirm"))
+}
+
+func TestConfirmEmptyMarked(t *testing.T) {
+ ui := getAnalyzedPathMockedApp(t, false, true, true)
+
+ ui.table.Select(1, 0)
+ ui.markedRows[1] = struct{}{}
+ ui.confirmDeletion(true)
+
+ assert.True(t, ui.pages.HasPage("confirm"))
+}
+
+func TestConfirmDeletionMarked(t *testing.T) {
+ ui := getAnalyzedPathMockedApp(t, true, true, true)
+
+ ui.table.Select(1, 0)
+ ui.markedRows[1] = struct{}{}
+ ui.confirmDeletion(false)
+
+ assert.True(t, ui.pages.HasPage("confirm"))
+}
+
+func TestConfirmDeletionMarkedBW(t *testing.T) {
+ ui := getAnalyzedPathMockedApp(t, false, true, true)
+
+ ui.table.Select(1, 0)
+ ui.markedRows[1] = struct{}{}
+ ui.confirmDeletion(false)
+
+ assert.True(t, ui.pages.HasPage("confirm"))
+}
+
+func TestDeleteSelected(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+ ui.done = make(chan struct{})
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.deleteSelected(false)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.NoDirExists(t, "test_dir/nested")
+}
+
+func TestDeleteSelectedInParallel(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+ ui.done = make(chan struct{})
+ ui.SetDeleteInParallel()
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.deleteSelected(false)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.NoDirExists(t, "test_dir/nested")
+}
+
+func TestDeleteSelectedInBackground(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, true, true, false)
+ ui.remover = testanalyze.ItemFromDirWithSleep
+ ui.done = make(chan struct{})
+ ui.SetDeleteInBackground()
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.deleteSelected(false)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.NoDirExists(t, "test_dir/nested")
+}
+
+func TestDeleteSelectedInBackgroundAndParallel(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, true, true, false)
+ ui.remover = testanalyze.ItemFromDirWithSleep
+ ui.done = make(chan struct{})
+ ui.SetDeleteInBackground()
+ ui.SetDeleteInParallel()
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.deleteSelected(false)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.NoDirExists(t, "test_dir/nested")
+}
+
+func TestDeleteSelectedInBackgroundBW(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+ ui.done = make(chan struct{})
+ ui.SetDeleteInBackground()
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.deleteSelected(false)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.NoDirExists(t, "test_dir/nested")
+}
+
+func TestEmptyDirInBackground(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, true, true, false)
+ ui.done = make(chan struct{})
+ ui.SetDeleteInBackground()
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.deleteSelected(true)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.DirExists(t, "test_dir/nested")
+ assert.NoDirExists(t, "test_dir/nested/subnested")
+}
+
+func TestEmptyFileInBackground(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, true, true, false)
+ ui.done = make(chan struct{})
+ ui.SetDeleteInBackground()
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.fileItemSelected(0, 0) // nested
+ ui.table.Select(2, 0)
+
+ ui.deleteSelected(true)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.DirExists(t, "test_dir/nested")
+ assert.FileExists(t, "test_dir/nested/file2")
+
+ f, err := os.Open("test_dir/nested/file2")
+ assert.Nil(t, err)
+ info, err := f.Stat()
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), info.Size())
+}
+
+func TestDeleteSelectedWithErr(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+ ui.remover = testanalyze.ItemFromDirWithErr
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.delete(false)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.True(t, ui.pages.HasPage("error"))
+ assert.DirExists(t, "test_dir/nested")
+}
+
+func TestDeleteSelectedInBackgroundWithErr(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+ ui.SetDeleteInBackground()
+ ui.remover = testanalyze.ItemFromDirWithSleepAndErr
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+
+ ui.delete(false)
+
+ <-ui.done
+
+ // change the status
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ // wait for status to be removed
+ time.Sleep(500 * time.Millisecond)
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.True(t, ui.pages.HasPage("error"))
+ assert.DirExists(t, "test_dir/nested")
+}
+
+func TestDeleteMarkedWithErr(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+ ui.remover = testanalyze.ItemFromDirWithErr
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+ ui.markedRows[0] = struct{}{}
+
+ ui.deleteMarked(false)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.True(t, ui.pages.HasPage("error"))
+ assert.DirExists(t, "test_dir/nested")
+}
+
+func TestDeleteMarkedInBackground(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+ ui.SetDeleteInBackground()
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.fileItemSelected(0, 0) // nested
+
+ ui.markedRows[1] = struct{}{} // subnested
+ ui.markedRows[2] = struct{}{} // file2
+
+ ui.deleteMarked(false)
+
+ <-ui.done // wait for deletion of subnested
+ <-ui.done // wait for deletion of file2
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.DirExists(t, "test_dir/nested")
+ assert.NoDirExists(t, "test_dir/nested/subnested")
+ assert.NoFileExists(t, "test_dir/nested/file2")
+}
+
+func TestDeleteMarkedInBackgroundWithStorage(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+ ui.SetAnalyzer(analyze.CreateStoredAnalyzer("/tmp/badger"))
+ ui.SetDeleteInBackground()
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.fileItemSelected(0, 0) // nested
+
+ ui.markedRows[1] = struct{}{} // subnested
+ ui.markedRows[2] = struct{}{} // file2
+
+ ui.deleteMarked(false)
+
+ <-ui.done // wait for deletion of subnested
+ <-ui.done // wait for deletion of file2
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.DirExists(t, "test_dir/nested")
+ assert.NoDirExists(t, "test_dir/nested/subnested")
+ assert.NoFileExists(t, "test_dir/nested/file2")
+}
+
+func TestDeleteMarkedInBackgroundWithStorageAndParallel(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+ ui.SetAnalyzer(analyze.CreateStoredAnalyzer("/tmp/badger"))
+ ui.SetDeleteInBackground()
+ ui.SetDeleteInParallel()
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.fileItemSelected(0, 0) // nested
+
+ ui.markedRows[1] = struct{}{} // subnested
+ ui.markedRows[2] = struct{}{} // file2
+
+ ui.deleteMarked(false)
+
+ <-ui.done // wait for deletion of subnested
+ <-ui.done // wait for deletion of file2
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.DirExists(t, "test_dir/nested")
+ assert.NoDirExists(t, "test_dir/nested/subnested")
+ assert.NoFileExists(t, "test_dir/nested/file2")
+}
+
+func TestDeleteMarkedInBackgroundWithErr(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+ ui.SetDeleteInBackground()
+ ui.remover = testanalyze.ItemFromDirWithErr
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+
+ ui.table.Select(0, 0)
+ ui.markedRows[0] = struct{}{}
+
+ ui.deleteMarked(false)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.True(t, ui.pages.HasPage("error"))
+ assert.DirExists(t, "test_dir/nested")
+}
+
+func TestShowErr(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, true, true, false, false)
+
+ ui.showErr("Something went wrong", errors.New("error"))
+
+ assert.True(t, ui.pages.HasPage("error"))
+}
+
+func TestShowErrBW(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+
+ ui.showErr("Something went wrong", errors.New("error"))
+
+ assert.True(t, ui.pages.HasPage("error"))
+}
+
+func TestMin(t *testing.T) {
+ assert.Equal(t, 2, min(2, 5))
+ assert.Equal(t, 3, min(4, 3))
+}
+
+func TestSetStyles(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ opts := []Option{}
+ opts = append(opts, func(ui *UI) {
+ ui.SetHeaderHidden()
+ })
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false, opts...)
+
+ ui.SetSelectedBackgroundColor(tcell.ColorRed)
+ ui.SetSelectedTextColor(tcell.ColorRed)
+ ui.SetFooterTextColor("red")
+ ui.SetFooterBackgroundColor("red")
+ ui.SetFooterNumberColor("red")
+ ui.SetHeaderTextColor("red")
+ ui.SetHeaderBackgroundColor("red")
+ ui.SetResultRowDirectoryColor("red")
+ ui.SetResultRowNumberColor("red")
+
+ assert.Equal(t, ui.selectedBackgroundColor, tcell.ColorRed)
+ assert.Equal(t, ui.selectedTextColor, tcell.ColorRed)
+ assert.Equal(t, ui.footerTextColor, "red")
+ assert.Equal(t, ui.footerBackgroundColor, "red")
+ assert.Equal(t, ui.footerNumberColor, "red")
+ assert.Equal(t, ui.headerTextColor, "red")
+ assert.Equal(t, ui.headerBackgroundColor, "red")
+ assert.Equal(t, ui.headerHidden, true)
+ assert.Equal(t, ui.resultRow.DirectoryColor, "red")
+ assert.Equal(t, ui.resultRow.NumberColor, "red")
+}
+
+func TestSetCurrentItemNameMaxLen(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+
+ ui.SetCurrentItemNameMaxLen(5)
+
+ assert.Equal(t, ui.currentItemNameMaxLen, 5)
+}
+
+func TestUseOldSizeBar(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+
+ ui.UseOldSizeBar()
+
+ assert.Equal(t, ui.useOldSizeBar, true)
+}
+
+func TestSetShowItemCount(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+
+ ui.SetShowItemCount()
+
+ assert.Equal(t, ui.showItemCount, true)
+}
+
+func TestSetShowMTime(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+
+ ui.SetShowMTime()
+
+ assert.Equal(t, ui.showMtime, true)
+}
+
+func TestNoDelete(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+
+ ui.SetNoDelete()
+
+ assert.Equal(t, ui.noDelete, true)
+}
+
+func TestNoSpawnShell(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+
+ ui.SetNoSpawnShell()
+
+ assert.Equal(t, ui.noSpawnShell, true)
+}
+
+func TestNoViewFile(t *testing.T) {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, false, true, false, false)
+
+ ui.SetNoViewFile()
+
+ assert.Equal(t, ui.noViewFile, true)
+}
+
+// nolint: unused // Why: for debugging
+func printScreen(simScreen tcell.SimulationScreen) {
+ b, _, _ := simScreen.GetContents()
+
+ for i, r := range b {
+ if string(r.Bytes) != " " {
+ println(i, string(r.Bytes))
+ }
+ }
+}
+
+func getDevicesInfoMock() device.DevicesInfoGetter {
+ item := &device.Device{
+ Name: "/dev/root",
+ MountPoint: "test_dir",
+ Size: 1e12,
+ Free: 1e6,
+ }
+ item2 := &device.Device{
+ Name: "/dev/boot",
+ MountPoint: "/boot",
+ Size: 1e6,
+ Free: 1e3,
+ }
+
+ mock := testdev.DevicesInfoGetterMock{}
+ mock.Devices = []*device.Device{item, item2}
+ return mock
+}
+
+func getAnalyzedPathMockedApp(t *testing.T, useColors, apparentSize, mockedAnalyzer bool) *UI {
+ simScreen := testapp.CreateSimScreen()
+ defer simScreen.Fini()
+
+ app := testapp.CreateMockedApp(true)
+ ui := CreateUI(app, simScreen, &bytes.Buffer{}, useColors, apparentSize, false, false)
+
+ if mockedAnalyzer {
+ ui.Analyzer = &testanalyze.MockedAnalyzer{}
+ }
+ ui.done = make(chan struct{})
+ err := ui.AnalyzePath("test_dir", nil)
+ assert.Nil(t, err)
+
+ <-ui.done // wait for analyzer
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.Equal(t, "test_dir", ui.currentDir.GetName())
+
+ return ui
+}
+
+func TestConfirmDeletionSelectedButtonOrder(t *testing.T) {
+ ui := getAnalyzedPathMockedApp(t, true, true, true)
+
+ ui.table.Select(1, 0)
+ ui.confirmDeletionSelected(false)
+
+ // Verify confirmation page is created
+ assert.True(t, ui.pages.HasPage("confirm"))
+}
+
+func TestConfirmDeletionSelectedSafeDefault(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+ ui.done = make(chan struct{})
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+ ui.table.Select(0, 0)
+
+ // Create confirmation dialog
+ ui.confirmDeletionSelected(false)
+
+ // Verify that the confirmation dialog exists with safer defaults
+ assert.DirExists(t, "test_dir/nested")
+ assert.True(t, ui.pages.HasPage("confirm"))
+}
+
+func TestConfirmDeletionButtonIndexMapping(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+ ui.done = make(chan struct{})
+ ui.askBeforeDelete = false // Skip confirmation for direct testing
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+ ui.table.Select(0, 0)
+
+ // Test that deletion still works when explicitly called
+ ui.deleteSelected(false)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.NoDirExists(t, "test_dir/nested")
+}
+
+func TestConfirmEmptySelectedSafeDefault(t *testing.T) {
+ ui := getAnalyzedPathMockedApp(t, true, true, true)
+
+ ui.table.Select(1, 0)
+ ui.confirmDeletionSelected(true)
+
+ // Verify empty confirmation dialog is created safely
+ assert.True(t, ui.pages.HasPage("confirm"))
+}
+
+func TestConfirmDeletionMarkedSafeDefault(t *testing.T) {
+ ui := getAnalyzedPathMockedApp(t, true, true, true)
+
+ ui.table.Select(1, 0)
+ ui.markedRows[1] = struct{}{}
+ ui.confirmDeletionMarked(false)
+
+ // Verify marked deletion confirmation dialog is created safely
+ assert.True(t, ui.pages.HasPage("confirm"))
+}
+
+func TestConfirmEmptyMarkedSafeDefault(t *testing.T) {
+ ui := getAnalyzedPathMockedApp(t, false, true, true)
+
+ ui.table.Select(1, 0)
+ ui.markedRows[1] = struct{}{}
+ ui.confirmDeletionMarked(true)
+
+ // Verify marked empty confirmation dialog is created safely
+ assert.True(t, ui.pages.HasPage("confirm"))
+}
+
+func TestSaferConfirmationPreventDataLoss(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+ ui.table.Select(0, 0)
+
+ // Test that creating confirmation dialog doesn't accidentally trigger deletion
+ ui.confirmDeletionSelected(false)
+ ui.confirmDeletionSelected(true) // empty
+
+ // Directory should still exist - no accidental deletion
+ assert.DirExists(t, "test_dir/nested")
+ assert.True(t, ui.pages.HasPage("confirm"))
+}
+
+func TestConfirmDeletionSelectedCase1(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+ ui.done = make(chan struct{})
+
+ assert.Equal(t, 1, ui.table.GetRowCount())
+ ui.table.Select(0, 0)
+
+ // Test case 1 branch (yes button at index 1) by directly calling deleteSelected
+ ui.deleteSelected(false)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.NoDirExists(t, "test_dir/nested")
+}
+
+func TestConfirmDeletionMarkedCase1(t *testing.T) {
+ fin := testdir.CreateTestDir()
+ defer fin()
+
+ ui := getAnalyzedPathMockedApp(t, false, true, false)
+ ui.done = make(chan struct{})
+
+ ui.fileItemSelected(0, 0) // nested
+ ui.markedRows[1] = struct{}{} // subnested
+
+ // Test case 1 branch (yes button at index 1) by directly calling deleteMarked
+ ui.deleteMarked(false)
+
+ <-ui.done
+
+ for _, f := range ui.app.(*testapp.MockedApp).GetUpdateDraws() {
+ f()
+ }
+
+ assert.NoDirExists(t, "test_dir/nested/subnested")
+}
--- /dev/null
+package tui
+
+import (
+ "path/filepath"
+ "slices"
+
+ "github.com/dundee/gdu/v5/pkg/device"
+ "github.com/dundee/gdu/v5/pkg/fs"
+ "github.com/rivo/tview"
+)
+
+var (
+ barFullRune = "\u2588"
+ barPartRunes = map[int]string{
+ 0: " ",
+ 1: "\u258F",
+ 2: "\u258E",
+ 3: "\u258D",
+ 4: "\u258C",
+ 5: "\u258B",
+ 6: "\u258A",
+ 7: "\u2589",
+ }
+)
+
+func getDeviceUsagePart(item *device.Device, useOld bool) string {
+ part := int(float64(item.Size-item.Free) / float64(item.Size) * 100.0)
+ if useOld {
+ return getUsageGraphOld(part)
+ }
+ return getUsageGraph(part)
+}
+
+func getUsageGraph(part int) string {
+ graph := " "
+ whole := part / 10
+ for i := 0; i < whole; i++ {
+ graph += barFullRune
+ }
+ partWidth := (part % 10) * 8 / 10
+ if part < 100 {
+ graph += barPartRunes[partWidth]
+ }
+
+ for i := 0; i < 10-whole-1; i++ {
+ graph += " "
+ }
+
+ graph += "\u258F"
+ return graph
+}
+
+func getUsageGraphOld(part int) string {
+ part /= 10
+ graph := "["
+ for i := 0; i < 10; i++ {
+ if part > i {
+ graph += "#"
+ } else {
+ graph += " "
+ }
+ }
+ graph += "]"
+ return graph
+}
+
+func modal(p tview.Primitive, width, height int) tview.Primitive {
+ return tview.NewFlex().
+ AddItem(nil, 0, 1, false).
+ AddItem(tview.NewFlex().SetDirection(tview.FlexRow).
+ AddItem(nil, 0, 1, false).
+ AddItem(p, height, 1, true).
+ AddItem(nil, 0, 1, false), width, 1, true).
+ AddItem(nil, 0, 1, false)
+}
+
+// CollapsedPath represents a directory chain that can be collapsed into a single display entry.
+// For example, if directory "a" contains only directory "b", and "b" contains only "c",
+// this represents the collapsed path "a/b/c" that allows direct navigation to the deepest directory.
+type CollapsedPath struct {
+ DisplayName string // The display name shown in the UI (e.g., "a/b/c")
+ DeepestDir fs.Item // The actual deepest directory item
+ Segments []string // Individual path segments of the collapsed chain
+}
+
+// findCollapsiblePath checks if the given directory item has a single subdirectory chain
+// and returns a CollapsedPath if it can be collapsed
+func findCollapsiblePath(item fs.Item) *CollapsedPath {
+ if item == nil || !item.IsDir() {
+ return nil
+ }
+
+ var segments []string
+ current := item
+
+ for {
+ // Collect files to check count and types
+ var files []fs.Item
+ for file := range current.GetFiles(fs.SortByName, fs.SortAsc) {
+ files = append(files, file)
+ }
+
+ if len(files) > 1 {
+ break
+ }
+
+ // Count directories and files separately
+ var subdirs []fs.Item
+ var fileCount int
+ for _, file := range files {
+ if file.IsDir() {
+ subdirs = append(subdirs, file)
+ } else {
+ fileCount++
+ }
+ }
+
+ // Only collapse if there's exactly one subdirectory AND no files
+ if len(subdirs) != 1 || fileCount > 0 {
+ break
+ }
+
+ // Add this segment to the path
+ // nolint:staticcheck // the result is used
+ segments = append(segments, subdirs[0].GetName())
+ current = subdirs[0]
+ }
+
+ // Only create collapsed path if we have at least one collapsible segment
+ if len(segments) == 0 {
+ return nil
+ }
+
+ return &CollapsedPath{
+ DisplayName: filepath.Join(slices.Concat([]string{item.GetName()}, segments)...),
+ DeepestDir: current,
+ Segments: segments,
+ }
+}
+
+// findCollapsedParent checks if the current directory is the deepest directory
+// in a collapsed path, and returns the appropriate parent to navigate to
+func findCollapsedParent(currentDir fs.Item) fs.Item {
+ if currentDir == nil {
+ return nil
+ }
+ if currentDir.GetParent() == nil {
+ return nil
+ }
+
+ // Check if current directory is part of a single-child chain going up
+ current := currentDir
+ var chainParent fs.Item
+
+ // Walk up the parent chain
+ for current.GetParent() != nil {
+ parent := current.GetParent()
+
+ // Count files in parent
+ fileCount := 0
+ for range parent.GetFiles(fs.SortByName, fs.SortAsc) {
+ fileCount++
+ if fileCount > 1 {
+ break
+ }
+ }
+
+ // If parent has more than one item, this is where the collapsed chain starts
+ if fileCount > 1 {
+ chainParent = parent
+ break
+ }
+
+ // Move up the chain
+ current = parent
+ }
+
+ // If we found a chain parent (meaning current dir is part of a collapsed path),
+ // return it, otherwise return the normal parent
+ if chainParent != nil {
+ return chainParent
+ }
+
+ return currentDir.GetParent()
+}
--- /dev/null
+package tui
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGetUsageGraph(t *testing.T) {
+ assert.Equal(t, " \u258F", getUsageGraph(0))
+ assert.Equal(t, " █ \u258F", getUsageGraph(10))
+ assert.Equal(t, " ██ \u258F", getUsageGraph(20))
+ assert.Equal(t, " ███ \u258F", getUsageGraph(30))
+ assert.Equal(t, " ████ \u258F", getUsageGraph(40))
+ assert.Equal(t, " █████ \u258F", getUsageGraph(50))
+ assert.Equal(t, " ██████ \u258F", getUsageGraph(60))
+ assert.Equal(t, " ███████ \u258F", getUsageGraph(70))
+ assert.Equal(t, " ████████ \u258F", getUsageGraph(80))
+ assert.Equal(t, " █████████ \u258F", getUsageGraph(90))
+ assert.Equal(t, " ██████████\u258F", getUsageGraph(100))
+
+ assert.Equal(t, " █ \u258F", getUsageGraph(11))
+ assert.Equal(t, " █▏ \u258F", getUsageGraph(12))
+ assert.Equal(t, " █▎ \u258F", getUsageGraph(13))
+ assert.Equal(t, " █▍ \u258F", getUsageGraph(14))
+ assert.Equal(t, " █▌ \u258F", getUsageGraph(15))
+ assert.Equal(t, " █▌ \u258F", getUsageGraph(16))
+ assert.Equal(t, " █▋ \u258F", getUsageGraph(17))
+ assert.Equal(t, " █▊ \u258F", getUsageGraph(18))
+ assert.Equal(t, " █▉ \u258F", getUsageGraph(19))
+}